diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst index 463cf7e73db80b3bf3f8a36afe91bcf3d7f0d429..7af83a92d2d6103ea3fb0a3a0425e994b29f7f53 100644 --- a/Documentation/admin-guide/pm/cpufreq.rst +++ b/Documentation/admin-guide/pm/cpufreq.rst @@ -237,6 +237,14 @@ are the following: This attribute is not present if the scaling driver in use does not support it. +``cpuinfo_cur_freq`` + Current frequency of the CPUs belonging to this policy as obtained from + the hardware (in KHz). + + This is expected to be the frequency the hardware actually runs at. + If that frequency cannot be determined, this attribute should not + be present. + ``cpuinfo_max_freq`` Maximum possible operating frequency the CPUs belonging to this policy can run at (in kHz). diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 7e06e65586d4ae110262a4dc8c0eef62d43d9dd5..4a0a7469fdd7bbcd93e26ea5ae9c11e4285f7c7f 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -343,3 +343,4 @@ Version History 1.11.0 Fix table line argument order (wrong raid10_copies/raid10_format sequence) 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option +1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt index 0764f9ab63dcde31f7efff01f74e56c5c6fa1c56..e20eac7a30874f0db82adefa0cdf3a77ac09aec2 100644 --- a/Documentation/devicetree/bindings/ata/sata_rcar.txt +++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt @@ -1,14 +1,22 @@ * Renesas R-Car SATA Required properties: -- compatible : should contain one of the following: +- compatible : should contain one or more of the following: - "renesas,sata-r8a7779" for R-Car H1 - ("renesas,rcar-sata" is deprecated) - "renesas,sata-r8a7790-es1" for R-Car H2 ES1 - "renesas,sata-r8a7790" for R-Car H2 other than ES1 - "renesas,sata-r8a7791" for R-Car M2-W - "renesas,sata-r8a7793" for R-Car M2-N - "renesas,sata-r8a7795" for R-Car H3 + - "renesas,rcar-gen2-sata" for a generic R-Car Gen2 compatible device + - "renesas,rcar-gen3-sata" for a generic R-Car Gen3 compatible device + - "renesas,rcar-sata" is deprecated + + When compatible with the generic version nodes + must list the SoC-specific version corresponding + to the platform first followed by the generic + version. + - reg : address and length of the SATA registers; - interrupts : must consist of one interrupt specifier. - clocks : must contain a reference to the functional clock. @@ -16,7 +24,7 @@ Required properties: Example: sata0: sata@ee300000 { - compatible = "renesas,sata-r8a7791"; + compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata"; reg = <0 0xee300000 0 0x2000>; interrupt-parent = <&gic>; interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>; diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt index f69773f4252bf01bb9948767b254bd7bd190a41a..941bb6a6fb1304609bb3fa8a94520bfbdcca06f7 100644 --- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt +++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt @@ -8,7 +8,6 @@ Required properties: Optional properties: - clocks: Reference to the crypto engine clock. -- dma-mask: The address mask limitation. Defaults to 64. Example: @@ -24,6 +23,5 @@ Example: interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3", "eip"; clocks = <&cpm_syscon0 1 26>; - dma-mask = <0xff 0xffffffff>; status = "disabled"; }; diff --git a/Documentation/devicetree/bindings/gpio/gpio-exar.txt b/Documentation/devicetree/bindings/gpio/gpio-exar.txt new file mode 100644 index 0000000000000000000000000000000000000000..4540d61824af8a3d9cccb33398876570d66fd6ab --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-exar.txt @@ -0,0 +1,5 @@ +Exportable MPIO interface of Exar UART chips + +Required properties of the device: + - exar,first-pin: first exportable pins (0..15) + - ngpios: number of exportable pins (1..16) diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt index d3b6e1a4713a58d00692ab8bf2ae74f1956a40e3..5aa5926029ee7286c4cd2e41a446574c13102021 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt @@ -40,7 +40,7 @@ Optional properties: Example for a Mali-T760: gpu@ffa30000 { - compatible = "rockchip,rk3288-mali", "arm,mali-t760", "arm,mali-midgard"; + compatible = "rockchip,rk3288-mali", "arm,mali-t760"; reg = <0xffa30000 0x10000>; interrupts = , , diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt index aad98442788bc386366b019a6166e6145b2f0017..a58c173b7ab9882091fe26e378ce655610f17455 100644 --- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt @@ -78,7 +78,6 @@ Example: }; dwmmc0@12200000 { - num-slots = <1>; cap-mmc-highspeed; cap-sd-highspeed; broken-cd; diff --git a/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt index 85de99fcaa2fa14c146f768431978b089c674d54..c54e577eea0703c498d5fb397bd1d509f03faaa4 100644 --- a/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt @@ -24,6 +24,5 @@ Example: fifo-depth = <0x20>; bus-width = <4>; - num-slots = <1>; disable-wp; }; diff --git a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt index 8af1afcb86dcf8c375aec37e99103ad73ddf2551..07242d1417735c56576bc59742284957259a4660 100644 --- a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt @@ -36,7 +36,6 @@ Example: /* Board portion */ dwmmc0@fcd03000 { - num-slots = <1>; vmmc-supply = <&ldo12>; fifo-depth = <0x100>; pinctrl-names = "default"; @@ -52,7 +51,6 @@ Example: dwmmc_1: dwmmc1@f723e000 { compatible = "hisilicon,hi6220-dw-mshc"; - num-slots = <0x1>; bus-width = <0x4>; disable-wp; cap-sd-highspeed; diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt index 9cb55ca5746135a104d9e6d4722b87d888be63af..ef3e5f14067a17b91abf9715e98d56ad4bb220dd 100644 --- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt @@ -12,12 +12,12 @@ Required Properties: * #address-cells: should be 1. * #size-cells: should be 0. -# Slots: The slot specific information are contained within child-nodes with - each child-node representing a supported slot. There should be atleast one - child node representing a card slot. The name of the child node representing - the slot is recommended to be slot@n where n is the unique number of the slot - connected to the controller. The following are optional properties which - can be included in the slot child node. +# Slots (DEPRECATED): The slot specific information are contained within + child-nodes with each child-node representing a supported slot. There should + be atleast one child node representing a card slot. The name of the child node + representing the slot is recommended to be slot@n where n is the unique number + of the slot connected to the controller. The following are optional properties + which can be included in the slot child node. * reg: specifies the physical slot number. The valid values of this property is 0 to (num-slots -1), where num-slots is the value @@ -63,7 +63,7 @@ Optional properties: clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default. (Use the "max-frequency" instead of "clock-freq-min-max".) -* num-slots: specifies the number of slots supported by the controller. +* num-slots (DEPRECATED): specifies the number of slots supported by the controller. The number of physical slots actually used could be equal or less than the value specified by num-slots. If this property is not specified, the value of num-slot property is assumed to be 1. @@ -124,7 +124,6 @@ board specific portions as listed below. dwmmc0@12200000 { clock-frequency = <400000000>; clock-freq-min-max = <400000 200000000>; - num-slots = <1>; broken-cd; fifo-depth = <0x80>; card-detect-delay = <200>; @@ -139,7 +138,6 @@ board specific portions as listed below. dwmmc0@12200000 { clock-frequency = <400000000>; clock-freq-min-max = <400000 200000000>; - num-slots = <1>; broken-cd; fifo-depth = <0x80>; card-detect-delay = <200>; diff --git a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt index eaade0e5adeb09e8d1c2e25763cf90d7bb3c06c7..906819a90c2bb6232f82f1549ea563c58132aec4 100644 --- a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt @@ -25,7 +25,6 @@ Example: clock-frequency = <50000000>; clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>; clock-names = "biu", "ciu"; - num-slots = <1>; max-frequency = <50000000>; cap-sdio-irq; cap-sd-highspeed; diff --git a/Documentation/fb/efifb.txt b/Documentation/fb/efifb.txt index a59916c29b3312cd4946a1d9a8da2331819e7845..1a85c1bdaf38a9ae7fb8b6555afc30abae661a20 100644 --- a/Documentation/fb/efifb.txt +++ b/Documentation/fb/efifb.txt @@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf: Macbook Pro 17", iMac 20" : video=efifb:i20 +Accepted options: + +nowc Don't map the framebuffer write combined. This can be used + to workaround side-effects and slowdowns on other CPU cores + when large amounts of console data are written. + -- Edgar Hucek diff --git a/Documentation/gpio/gpio-legacy.txt b/Documentation/gpio/gpio-legacy.txt index b34fd94f70898a7f65c2a0313349588411eb8e81..5eacc147ea870c80bb06c38d43bd5b662c171194 100644 --- a/Documentation/gpio/gpio-legacy.txt +++ b/Documentation/gpio/gpio-legacy.txt @@ -459,7 +459,7 @@ pin controller? This is done by registering "ranges" of pins, which are essentially cross-reference tables. These are described in -Documentation/pinctrl.txt +Documentation/driver-api/pinctl.rst While the pin allocation is totally managed by the pinctrl subsystem, gpio (under gpiolib) is still maintained by gpio drivers. It may happen diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst index ff86bf0abeae9624c7dd2ae898bc4af464ddc6f9..de9a228aca8a862ff585b374a498bbc9d103ef1c 100644 --- a/Documentation/media/kapi/dtv-core.rst +++ b/Documentation/media/kapi/dtv-core.rst @@ -1,6 +1,31 @@ Digital TV (DVB) devices ------------------------ +Digital TV devices are implemented by several different drivers: + +- A bridge driver that is responsible to talk with the bus where the other + devices are connected (PCI, USB, SPI), bind to the other drivers and + implement the digital demux logic (either in software or in hardware); + +- Frontend drivers that are usually implemented as two separate drivers: + + - A tuner driver that implements the logic with commands the part of the + hardware with is reponsible to tune into a digital TV transponder or + physical channel. The output of a tuner is usually a baseband or + Intermediate Frequency (IF) signal; + + - A demodulator driver (a.k.a "demod") that implements the logic with + commands the digital TV decoding hardware. The output of a demod is + a digital stream, with multiple audio, video and data channels typically + multiplexed using MPEG Transport Stream [#f1]_. + +On most hardware, the frontend drivers talk with the bridge driver using an +I2C bus. + +.. [#f1] Some standards use TCP/IP for multiplexing data, like DVB-H (an + abandoned standard, not used anymore) and ATSC version 3.0 current + proposals. Currently, the DVB subsystem doesn't implement those standards. + Digital TV Common functions --------------------------- @@ -55,8 +80,141 @@ Digital TV Frontend The Digital TV Frontend kABI defines a driver-internal interface for registering low-level, hardware specific driver to a hardware independent frontend layer. It is only of interest for Digital TV device driver writers. -The header file for this API is named dvb_frontend.h and located in -drivers/media/dvb-core. +The header file for this API is named ``dvb_frontend.h`` and located in +``drivers/media/dvb-core``. + +Demodulator driver +^^^^^^^^^^^^^^^^^^ + +The demodulator driver is responsible to talk with the decoding part of the +hardware. Such driver should implement :c:type:`dvb_frontend_ops`, with +tells what type of digital TV standards are supported, and points to a +series of functions that allow the DVB core to command the hardware via +the code under ``drivers/media/dvb-core/dvb_frontend.c``. + +A typical example of such struct in a driver ``foo`` is:: + + static struct dvb_frontend_ops foo_ops = { + .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A }, + .info = { + .name = "foo DVB-T/T2/C driver", + .caps = FE_CAN_FEC_1_2 | + FE_CAN_FEC_2_3 | + FE_CAN_FEC_3_4 | + FE_CAN_FEC_5_6 | + FE_CAN_FEC_7_8 | + FE_CAN_FEC_AUTO | + FE_CAN_QPSK | + FE_CAN_QAM_16 | + FE_CAN_QAM_32 | + FE_CAN_QAM_64 | + FE_CAN_QAM_128 | + FE_CAN_QAM_256 | + FE_CAN_QAM_AUTO | + FE_CAN_TRANSMISSION_MODE_AUTO | + FE_CAN_GUARD_INTERVAL_AUTO | + FE_CAN_HIERARCHY_AUTO | + FE_CAN_MUTE_TS | + FE_CAN_2G_MODULATION, + .frequency_min = 42000000, /* Hz */ + .frequency_max = 1002000000, /* Hz */ + .symbol_rate_min = 870000, + .symbol_rate_max = 11700000 + }, + .init = foo_init, + .sleep = foo_sleep, + .release = foo_release, + .set_frontend = foo_set_frontend, + .get_frontend = foo_get_frontend, + .read_status = foo_get_status_and_stats, + .tune = foo_tune, + .i2c_gate_ctrl = foo_i2c_gate_ctrl, + .get_frontend_algo = foo_get_algo, + }; + +A typical example of such struct in a driver ``bar`` meant to be used on +Satellite TV reception is:: + + static const struct dvb_frontend_ops bar_ops = { + .delsys = { SYS_DVBS, SYS_DVBS2 }, + .info = { + .name = "Bar DVB-S/S2 demodulator", + .frequency_min = 500000, /* KHz */ + .frequency_max = 2500000, /* KHz */ + .frequency_stepsize = 0, + .symbol_rate_min = 1000000, + .symbol_rate_max = 45000000, + .symbol_rate_tolerance = 500, + .caps = FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_AUTO | + FE_CAN_QPSK, + }, + .init = bar_init, + .sleep = bar_sleep, + .release = bar_release, + .set_frontend = bar_set_frontend, + .get_frontend = bar_get_frontend, + .read_status = bar_get_status_and_stats, + .i2c_gate_ctrl = bar_i2c_gate_ctrl, + .get_frontend_algo = bar_get_algo, + .tune = bar_tune, + + /* Satellite-specific */ + .diseqc_send_master_cmd = bar_send_diseqc_msg, + .diseqc_send_burst = bar_send_burst, + .set_tone = bar_set_tone, + .set_voltage = bar_set_voltage, + }; + +.. note:: + + #) For satellite digital TV standards (DVB-S, DVB-S2, ISDB-S), the + frequencies are specified in kHz, while, for terrestrial and cable + standards, they're specified in Hz. Due to that, if the same frontend + supports both types, you'll need to have two separate + :c:type:`dvb_frontend_ops` structures, one for each standard. + #) The ``.i2c_gate_ctrl`` field is present only when the hardware has + allows controlling an I2C gate (either directly of via some GPIO pin), + in order to remove the tuner from the I2C bus after a channel is + tuned. + #) All new drivers should implement the + :ref:`DVBv5 statistics ` via ``.read_status``. + Yet, there are a number of callbacks meant to get statistics for + signal strength, S/N and UCB. Those are there to provide backward + compatibility with legacy applications that don't support the DVBv5 + API. Implementing those callbacks are optional. Those callbacks may be + removed in the future, after we have all existing drivers supporting + DVBv5 stats. + #) Other callbacks are required for satellite TV standards, in order to + control LNBf and DiSEqC: ``.diseqc_send_master_cmd``, + ``.diseqc_send_burst``, ``.set_tone``, ``.set_voltage``. + +.. |delta| unicode:: U+00394 + +The ``drivers/media/dvb-core/dvb_frontend.c`` has a kernel thread with is +responsible for tuning the device. It supports multiple algoritms to +detect a channel, as defined at enum :c:func:`dvbfe_algo`. + +The algorithm to be used is obtained via ``.get_frontend_algo``. If the driver +doesn't fill its field at struct :c:type:`dvb_frontend_ops`, it will default to +``DVBFE_ALGO_SW``, meaning that the dvb-core will do a zigzag when tuning, +e. g. it will try first to use the specified center frequency ``f``, +then, it will do ``f`` + |delta|, ``f`` - |delta|, ``f`` + 2 x |delta|, +``f`` - 2 x |delta| and so on. + +If the hardware has internally a some sort of zigzag algorithm, you should +define a ``.get_frontend_algo`` function that would return ``DVBFE_ALGO_HW``. + +.. note:: + + The core frontend support also supports + a third type (``DVBFE_ALGO_CUSTOM``), in order to allow the driver to + define its own hardware-assisted algorithm. Very few hardware need to + use it nowadays. Using ``DVBFE_ALGO_CUSTOM`` require to provide other + function callbacks at struct :c:type:`dvb_frontend_ops`. + +Attaching frontend driver to the bridge driver +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Before using the Digital TV frontend core, the bridge driver should attach the frontend demod, tuner and SEC devices and call @@ -74,6 +232,287 @@ part of their handler for :c:type:`device_driver`.\ ``resume()``. A few other optional functions are provided to handle some special cases. +.. _dvbv5_stats: + +Digital TV Frontend statistics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Introduction +^^^^^^^^^^^^ + +Digital TV frontends provide a range of +:ref:`statistics ` meant to help tuning the device +and measuring the quality of service. + +For each statistics measurement, the driver should set the type of scale used, +or ``FE_SCALE_NOT_AVAILABLE`` if the statistics is not available on a given +time. Drivers should also provide the number of statistics for each type. +that's usually 1 for most video standards [#f2]_. + +Drivers should initialize each statistic counters with length and +scale at its init code. For example, if the frontend provides signal +strength, it should have, on its init code:: + + struct dtv_frontend_properties *c = &state->fe.dtv_property_cache; + + c->strength.len = 1; + c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + +And, when the statistics got updated, set the scale:: + + c->strength.stat[0].scale = FE_SCALE_DECIBEL; + c->strength.stat[0].uvalue = strength; + +.. [#f2] For ISDB-T, it may provide both a global statistics and a per-layer + set of statistics. On such cases, len should be equal to 4. The first + value corresponds to the global stat; the other ones to each layer, e. g.: + + - c->cnr.stat[0] for global S/N carrier ratio, + - c->cnr.stat[1] for Layer A S/N carrier ratio, + - c->cnr.stat[2] for layer B S/N carrier ratio, + - c->cnr.stat[3] for layer C S/N carrier ratio. + +.. note:: Please prefer to use ``FE_SCALE_DECIBEL`` instead of + ``FE_SCALE_RELATIVE`` for signal strength and CNR measurements. + +Groups of statistics +^^^^^^^^^^^^^^^^^^^^ + +There are several groups of statistics currently supported: + +Signal strength (:ref:`DTV-STAT-SIGNAL-STRENGTH`) + - Measures the signal strength level at the analog part of the tuner or + demod. + + - Typically obtained from the gain applied to the tuner and/or frontend + in order to detect the carrier. When no carrier is detected, the gain is + at the maximum value (so, strength is on its minimal). + + - As the gain is visible through the set of registers that adjust the gain, + typically, this statistics is always available [#f3]_. + + - Drivers should try to make it available all the times, as this statistics + can be used when adjusting an antenna position and to check for troubles + at the cabling. + + .. [#f3] On a few devices, the gain keeps floating if no carrier. + On such devices, strength report should check first if carrier is + detected at the tuner (``FE_HAS_CARRIER``, see :c:type:`fe_status`), + and otherwise return the lowest possible value. + +Carrier Signal to Noise ratio (:ref:`DTV-STAT-CNR`) + - Signal to Noise ratio for the main carrier. + + - Signal to Noise measurement depends on the device. On some hardware, is + available when the main carrier is detected. On those hardware, CNR + measurement usually comes from the tuner (e. g. after ``FE_HAS_CARRIER``, + see :c:type:`fe_status`). + + On other devices, it requires inner FEC decoding, + as the frontend measures it indirectly from other parameters (e. g. after + ``FE_HAS_VITERBI``, see :c:type:`fe_status`). + + Having it available after inner FEC is more common. + +Bit counts post-FEC (:ref:`DTV-STAT-POST-ERROR-BIT-COUNT` and :ref:`DTV-STAT-POST-TOTAL-BIT-COUNT`) + - Those counters measure the number of bits and bit errors errors after + the forward error correction (FEC) on the inner coding block + (after Viterbi, LDPC or other inner code). + + - Due to its nature, those statistics depend on full coding lock + (e. g. after ``FE_HAS_SYNC`` or after ``FE_HAS_LOCK``, + see :c:type:`fe_status`). + +Bit counts pre-FEC (:ref:`DTV-STAT-PRE-ERROR-BIT-COUNT` and :ref:`DTV-STAT-PRE-TOTAL-BIT-COUNT`) + - Those counters measure the number of bits and bit errors errors before + the forward error correction (FEC) on the inner coding block + (before Viterbi, LDPC or other inner code). + + - Not all frontends provide this kind of statistics. + + - Due to its nature, those statistics depend on inner coding lock (e. g. + after ``FE_HAS_VITERBI``, see :c:type:`fe_status`). + +Block counts (:ref:`DTV-STAT-ERROR-BLOCK-COUNT` and :ref:`DTV-STAT-TOTAL-BLOCK-COUNT`) + - Those counters measure the number of blocks and block errors errors after + the forward error correction (FEC) on the inner coding block + (before Viterbi, LDPC or other inner code). + + - Due to its nature, those statistics depend on full coding lock + (e. g. after ``FE_HAS_SYNC`` or after + ``FE_HAS_LOCK``, see :c:type:`fe_status`). + +.. note:: All counters should be monotonically increased as they're + collected from the hardware. + +A typical example of the logic that handle status and statistics is:: + + static int foo_get_status_and_stats(struct dvb_frontend *fe) + { + struct foo_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + + int rc; + enum fe_status *status; + + /* Both status and strength are always available */ + rc = foo_read_status(fe, &status); + if (rc < 0) + return rc; + + rc = foo_read_strength(fe); + if (rc < 0) + return rc; + + /* Check if CNR is available */ + if (!(fe->status & FE_HAS_CARRIER)) + return 0; + + rc = foo_read_cnr(fe); + if (rc < 0) + return rc; + + /* Check if pre-BER stats are available */ + if (!(fe->status & FE_HAS_VITERBI)) + return 0; + + rc = foo_get_pre_ber(fe); + if (rc < 0) + return rc; + + /* Check if post-BER stats are available */ + if (!(fe->status & FE_HAS_SYNC)) + return 0; + + rc = foo_get_post_ber(fe); + if (rc < 0) + return rc; + } + + static const struct dvb_frontend_ops ops = { + /* ... */ + .read_status = foo_get_status_and_stats, + }; + +Statistics collect +^^^^^^^^^^^^^^^^^^ + +On almost all frontend hardware, the bit and byte counts are stored by +the hardware after a certain amount of time or after the total bit/block +counter reaches a certain value (usually programable), for example, on +every 1000 ms or after receiving 1,000,000 bits. + +So, if you read the registers too soon, you'll end by reading the same +value as in the previous reading, causing the monotonic value to be +incremented too often. + +Drivers should take the responsibility to avoid too often reads. That +can be done using two approaches: + +if the driver have a bit that indicates when a collected data is ready +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +Driver should check such bit before making the statistics available. + +An example of such behavior can be found at this code snippet (adapted +from mb86a20s driver's logic):: + + static int foo_get_pre_ber(struct dvb_frontend *fe) + { + struct foo_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + int rc, bit_error; + + /* Check if the BER measures are already available */ + rc = foo_read_u8(state, 0x54); + if (rc < 0) + return rc; + + if (!rc) + return 0; + + /* Read Bit Error Count */ + bit_error = foo_read_u32(state, 0x55); + if (bit_error < 0) + return bit_error; + + /* Read Total Bit Count */ + rc = foo_read_u32(state, 0x51); + if (rc < 0) + return rc; + + c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->pre_bit_error.stat[0].uvalue += bit_error; + c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; + c->pre_bit_count.stat[0].uvalue += rc; + + return 0; + } + +If the driver doesn't provide a statistics available check bit +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +A few devices, however, may not provide a way to check if the stats are +available (or the way to check it is unknown). They may not even provide +a way to directly read the total number of bits or blocks. + +On those devices, the driver need to ensure that it won't be reading from +the register too often and/or estimate the total number of bits/blocks. + +On such drivers, a typical routine to get statistics would be like +(adapted from dib8000 driver's logic):: + + struct foo_state { + /* ... */ + + unsigned long per_jiffies_stats; + } + + static int foo_get_pre_ber(struct dvb_frontend *fe) + { + struct foo_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + int rc, bit_error; + u64 bits; + + /* Check if time for stats was elapsed */ + if (!time_after(jiffies, state->per_jiffies_stats)) + return 0; + + /* Next stat should be collected in 1000 ms */ + state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000); + + /* Read Bit Error Count */ + bit_error = foo_read_u32(state, 0x55); + if (bit_error < 0) + return bit_error; + + /* + * On this particular frontend, there's no register that + * would provide the number of bits per 1000ms sample. So, + * some function would calculate it based on DTV properties + */ + bits = get_number_of_bits_per_1000ms(fe); + + c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->pre_bit_error.stat[0].uvalue += bit_error; + c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; + c->pre_bit_count.stat[0].uvalue += bits; + + return 0; + } + +Please notice that, on both cases, we're getting the statistics using the +:c:type:`dvb_frontend_ops` ``.read_status`` callback. The rationale is that +the frontend core will automatically call this function periodically +(usually, 3 times per second, when the frontend is locked). + +That warrants that we won't miss to collect a counter and increment the +monotonic stats at the right time. + +Digital TV Frontend functions and types +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. kernel-doc:: drivers/media/dvb-core/dvb_frontend.h diff --git a/Documentation/media/typical_media_device.svg b/Documentation/media/typical_media_device.svg index 0c8abd69f39aff72e9ee732cb7c80410fbbe5778..d6fad90ec19987ad2be86387b72277094a677c3d 100644 --- a/Documentation/media/typical_media_device.svg +++ b/Documentation/media/typical_media_device.svg @@ -1,2948 +1,106 @@ - -image/svg+xmlAudio decoder -Video decoder -Audio encoder -Button Key/IR input logic -EEPROM -Sensor -System Bus -Demux -Conditional Access Module -Video encoder -Radio / Analog TV -Digital TV -PS.: picture is not complete: other blocks may be present -Webcam -Processing blocks -Smartcard -TunerFM/TV -Satellite Equipment Control (SEC) -Demod -I2C Bus (control bus) -Digital TV Frontend - -CPU -PCI, USB, SPI, I2C, ... -Bridge - DMA - \ No newline at end of file + +image/svg+xmlAudio decoder +Video decoder +Audio encoder +Button Key/IR input logic +EEPROM +Sensor +System Bus +Demux +Conditional Access Module +Video encoder +Radio / Analog TV +Digital TV +PS.: picture is not complete: other blocks may be present +Webcam +Processing blocks +Smartcard +TunerFM/TV +Satellite Equipment Control (SEC) +Demod +I2C Bus (control bus) +Digital TV Frontend + +CPU +PCI, USB, SPI, I2C, ... +Bridge + DMA + diff --git a/Documentation/media/uapi/dvb/dvbstb.svg b/Documentation/media/uapi/dvb/dvbstb.svg index c4140fb518afc91cd5e84245d14b8313a498231a..4effe45b448d5812e1be9235bcbd810993d4524a 100644 --- a/Documentation/media/uapi/dvb/dvbstb.svg +++ b/Documentation/media/uapi/dvb/dvbstb.svg @@ -1,651 +1,17 @@ - -image/svg+xmlAntena -Frontend -CA -Demux -SEC -Audio -Video -TV - \ No newline at end of file + +image/svg+xmlAntena +Frontend +CA +Demux +SEC +Audio +Video +TV + diff --git a/Documentation/media/uapi/v4l/bayer.svg b/Documentation/media/uapi/v4l/bayer.svg index fbd4cfb5e6bffb7a7d13c3f4fd6580e28e83b152..c395113d1876b4daefd5eb222c15286f491bcaaf 100644 --- a/Documentation/media/uapi/v4l/bayer.svg +++ b/Documentation/media/uapi/v4l/bayer.svg @@ -1,984 +1,29 @@ - -image/svg+xmlB -G -G -R -BGGR -B -G -G -R -GBRG -B -G -G -R -RGGB -B -G -G -R -GRBG - \ No newline at end of file + +image/svg+xmlB +G +G +R +BGGR +B +G +G +R +GBRG +B +G +G +R +RGGB +B +G +G +R +GRBG + diff --git a/Documentation/media/uapi/v4l/constraints.svg b/Documentation/media/uapi/v4l/constraints.svg index f710ee46b1f8b2d5dbaf5b9fe39ee711375fdc6a..7e5d7185ca49ddbd40d4666d5f1dbaa0858ad0f2 100644 --- a/Documentation/media/uapi/v4l/constraints.svg +++ b/Documentation/media/uapi/v4l/constraints.svg @@ -1,346 +1,10 @@ - -image/svg+xmlV4L2_SEL_FLAG_GE -ORIGINAL -V4L2_SEL_FLAG_LE - \ No newline at end of file + +image/svg+xmlV4L2_SEL_FLAG_GE +ORIGINAL +V4L2_SEL_FLAG_LE + diff --git a/Documentation/media/uapi/v4l/crop.svg b/Documentation/media/uapi/v4l/crop.svg index dc9a471bae6b6bca8ba17dbe2f3c7417291f1ceb..3878fe4c49e99736e703903411eb9e04c89ec87d 100644 --- a/Documentation/media/uapi/v4l/crop.svg +++ b/Documentation/media/uapi/v4l/crop.svg @@ -18,33 +18,34 @@ viewBox="0 0 739.11388 339.6584" sodipodi:docname="crop.svg">image/svg+xmlimage/svg+xmlv4l2_cropcap.bounds + transform="matrix(1,0,0,-1,204.52,9.07751)" + style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#d10000;fill-opacity:1;fill-rule:nonzero;stroke:none" + id="text122">v4l2_cropcap.bounds v4l2_cropcap.defrect + transform="matrix(1,0,0,-1,58.5175,166.42)" + style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#0000d1;fill-opacity:1;fill-rule:nonzero;stroke:none" + id="text128">v4l2_cropcap.defrect v4l2_crop.c + transform="matrix(1,0,0,-1,153.49,152.245)" + style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#008f00;fill-opacity:1;fill-rule:nonzero;stroke:none" + id="text134">v4l2_crop.c v4l2_format + transform="matrix(1,0,0,-1,309.415,30.34)" + style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#b000b0;fill-opacity:1;fill-rule:nonzero;stroke:none" + id="text140">v4l2_format + sodipodi:role="line" + id="tspan3398" + x="-99.291145" + y="-239.49893"> diff --git a/Documentation/media/uapi/v4l/fieldseq_bt.svg b/Documentation/media/uapi/v4l/fieldseq_bt.svg index b195301771ce0f10d4f65d55a22e851e305ce209..909d758f8543fea33e710deb77ad8c6c461a2c6e 100644 --- a/Documentation/media/uapi/v4l/fieldseq_bt.svg +++ b/Documentation/media/uapi/v4l/fieldseq_bt.svg @@ -42,15 +42,15 @@ inkscape:window-maximized="1" inkscape:current-layer="g3627" />image/svg+xmlimage/svg+xmlV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_ALTERNATE + id="tspan4843" + sodipodi:role="line" + y="-533.07098" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_ALTERNATE V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMv4l2_buffer.field: + id="tspan4851" + sodipodi:role="line" + y="-316.23969" + x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMv4l2_buffer.field: Temporal order, bottom field first transmitted (e.g. M/NTSC) + id="tspan4857" + sodipodi:role="line" + y="-592.59381" + x="5.8034 13.134789 19.806232 29.801399 36.472843 43.144287 47.139954 53.811398 56.475174 59.810898 66.482346 70.478004 77.149452 83.820892 87.816566 91.152283 94.488007 101.15945 107.83089 111.16662 114.50233 121.17377 131.16895 134.50468 137.84041 140.50418 147.17563 149.8394 156.51085 159.84657 163.1823 165.84607 169.84174 175.84123 179.17696 182.51268 185.8484 189.84407 196.51552 203.18695 209.18646 219.18163 221.8454 225.18112 228.51685 235.18829 241.85973 245.19545 249.19112 255.86256 259.19827 265.86972 269.20544 272.54117 282.53635 285.87207 294.53534 301.86673 309.87006 318.53336">Temporal order, bottom field first transmitted (e.g. M/NTSC) V4L2_FIELD_TOPV4L2_FIELD_TOP + id="tspan4861" + sodipodi:role="line" + y="-316.23981" + x="290.6604 296.16284 300.74957 305.3363 309.92303 314.50977 319.55023 321.8436 327.34601 331.93274 337.88889 342.47565 347.51608 353.9342 477.73062 483.23306 487.81979 492.40652 496.99326 501.57999 506.62045 508.91382 514.41626 519.00299 524.95911 529.5459 534.5863 541.00446">V4L2_FIELD_TOPV4L2_FIELD_TOP V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BTV4L2_FIELD_INTERLACED_TB (misaligned)V4L2_FIELD_SEQ_BT + id="tspan4865" + sodipodi:role="line" + y="-299.23349" + x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BTV4L2_FIELD_INTERLACED_TB (misaligned)V4L2_FIELD_SEQ_BT V4L2_FIELD_TOP + id="tspan4594" + sodipodi:role="line" + y="-533.07098" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP V4L2_FIELD_BOTTOM + id="tspan4598" + sodipodi:role="line" + y="-465.04559" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM V4L2_FIELD_ALTERNATE + id="tspan4602" + sodipodi:role="line" + y="-397.0202" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT + id="tspan5864" + sodipodi:role="line" + y="-299.23349" + x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT V4L2_FIELD_INTERLACED_TB (misaligned) + id="tspan5868" + sodipodi:role="line" + y="-192.9435" + x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned) V4L2_FIELD_SEQ_BT + id="tspan5872" + sodipodi:role="line" + y="-86.653496" + x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM + id="tspan7146" + sodipodi:role="line" + y="-316.23969" + x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM v4l2_buffer.field: + id="tspan7150" + sodipodi:role="line" + y="-328.99481" + x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field: \ No newline at end of file diff --git a/Documentation/media/uapi/v4l/fieldseq_tb.svg b/Documentation/media/uapi/v4l/fieldseq_tb.svg index 6a7b10ad4ab80b0f04db7594aae1aea95620384b..7c74344e770fa64658d20b86d3e7be42743b91ab 100644 --- a/Documentation/media/uapi/v4l/fieldseq_tb.svg +++ b/Documentation/media/uapi/v4l/fieldseq_tb.svg @@ -42,15 +42,15 @@ inkscape:window-maximized="1" inkscape:current-layer="g5551" />image/svg+xmlimage/svg+xmlV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_ALTERNATE + id="tspan6767" + sodipodi:role="line" + y="-528.771" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_ALTERNATE v4l2_buffer.field:V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM + id="tspan6775" + sodipodi:role="line" + y="-324.69479" + x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM Temporal order, top field first transmitted (e.g. BG/PAL)V4L2_FIELD_SEQ_TBV4L2_FIELD_INTERLACED_BT (misaligned)V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB + id="tspan6781" + sodipodi:role="line" + y="-588.2937" + x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)V4L2_FIELD_SEQ_TBV4L2_FIELD_INTERLACED_BT (misaligned)V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB V4L2_FIELD_TOP + id="tspan4585" + sodipodi:role="line" + y="-528.771" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP V4L2_FIELD_BOTTOM + id="tspan4589" + sodipodi:role="line" + y="-460.74561" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM V4L2_FIELD_ALTERNATE + id="tspan4593" + sodipodi:role="line" + y="-392.72021" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE Temporal order, top field first transmitted (e.g. BG/PAL) + id="tspan5849" + sodipodi:role="line" + y="-588.2937" + x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL) V4L2_FIELD_SEQ_TB + id="tspan5853" + sodipodi:role="line" + y="-86.604706" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB V4L2_FIELD_INTERLACED_BT (misaligned) + id="tspan5857" + sodipodi:role="line" + y="-192.89471" + x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned) V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB + id="tspan5861" + sodipodi:role="line" + y="-294.93271" + x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB v4l2_buffer.field: + id="tspan7133" + sodipodi:role="line" + y="-324.69479" + x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field: V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM + id="tspan7137" + sodipodi:role="line" + y="-311.9397" + x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 +538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM \ No newline at end of file diff --git a/Documentation/media/uapi/v4l/nv12mt.svg b/Documentation/media/uapi/v4l/nv12mt.svg index 21fcccda9723724c425ef16ce3a8f7a2fcffb469..65d05606c04cdb360aab9a29b4684e235c0c4344 100644 --- a/Documentation/media/uapi/v4l/nv12mt.svg +++ b/Documentation/media/uapi/v4l/nv12mt.svg @@ -18,8 +18,8 @@ sodipodi:docname="nv12mt.svg" style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round">image/svg+xmlimage/svg+xml0 + id="id1" + class="Slide" + clip-path="url(#presentation_clip_path)">0 6 + class="com.sun.star.drawing.CustomShape" + id="g199">6 1 + class="com.sun.star.drawing.CustomShape" + id="g221">1 7 + class="com.sun.star.drawing.CustomShape" + id="g236">7 2 + class="com.sun.star.drawing.CustomShape" + id="g251">2 4 + class="com.sun.star.drawing.CustomShape" + id="g266">4 3 + class="com.sun.star.drawing.CustomShape" + id="g288">3 5 + class="com.sun.star.drawing.CustomShape" + id="g303">5 + class="com.sun.star.drawing.LineShape" + id="g318"> diff --git a/Documentation/media/uapi/v4l/nv12mt_example.svg b/Documentation/media/uapi/v4l/nv12mt_example.svg index d65d989ee73b09e455dd9d1d6e7e1353283c34f0..fc51fe8fda8b677e3a74969009e31ac197851124 100644 --- a/Documentation/media/uapi/v4l/nv12mt_example.svg +++ b/Documentation/media/uapi/v4l/nv12mt_example.svg @@ -18,8 +18,8 @@ sodipodi:docname="nv12mt_example.svg" style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round">image/svg+xmlimage/svg+xml0 + class="BoundingBox" + x="3299" + y="3199" + width="2403" + height="1403" + id="rect191" + style="fill:none;stroke:none" />0 6 + class="BoundingBox" + x="8099" + y="3199" + width="2403" + height="1403" + id="rect213" + style="fill:none;stroke:none" />6 1 + class="BoundingBox" + x="5699" + y="3199" + width="2403" + height="1403" + id="rect228" + style="fill:none;stroke:none" />1 7 + class="BoundingBox" + x="10499" + y="3199" + width="2403" + height="1403" + id="rect243" + style="fill:none;stroke:none" />7 9 + class="BoundingBox" + x="15299" + y="3199" + width="2403" + height="1403" + id="rect265" + style="fill:none;stroke:none" />9 8 + class="BoundingBox" + x="12899" + y="3199" + width="2403" + height="1403" + id="rect280" + style="fill:none;stroke:none" />8 2 + class="BoundingBox" + x="3299" + y="4599" + width="2403" + height="1403" + id="rect295" + style="fill:none;stroke:none" />2 4 + class="BoundingBox" + x="8099" + y="4599" + width="2403" + height="1403" + id="rect317" + style="fill:none;stroke:none" />4 3 + class="BoundingBox" + x="5699" + y="4599" + width="2403" + height="1403" + id="rect332" + style="fill:none;stroke:none" />3 5 + class="BoundingBox" + x="10499" + y="4599" + width="2403" + height="1403" + id="rect347" + style="fill:none;stroke:none" />5 11 + class="BoundingBox" + x="15299" + y="4599" + width="2403" + height="1403" + id="rect369" + style="fill:none;stroke:none" />11 10 + class="BoundingBox" + x="12899" + y="4599" + width="2403" + height="1403" + id="rect384" + style="fill:none;stroke:none" />10 12 + class="BoundingBox" + x="3299" + y="5999" + width="2403" + height="1403" + id="rect399" + style="fill:none;stroke:none" />12 18 + class="BoundingBox" + x="8099" + y="5999" + width="2403" + height="1403" + id="rect421" + style="fill:none;stroke:none" />18 13 + class="BoundingBox" + x="5699" + y="5999" + width="2403" + height="1403" + id="rect436" + style="fill:none;stroke:none" />13 19 + class="BoundingBox" + x="10499" + y="5999" + width="2403" + height="1403" + id="rect451" + style="fill:none;stroke:none" />19 21 + class="BoundingBox" + x="15299" + y="5999" + width="2403" + height="1403" + id="rect473" + style="fill:none;stroke:none" />21 20 + class="BoundingBox" + x="12899" + y="5999" + width="2403" + height="1403" + id="rect488" + style="fill:none;stroke:none" />20 14 + class="BoundingBox" + x="3299" + y="7399" + width="2403" + height="1403" + id="rect503" + style="fill:none;stroke:none" />14 16 + class="BoundingBox" + x="8099" + y="7399" + width="2403" + height="1403" + id="rect525" + style="fill:none;stroke:none" />16 15 + class="BoundingBox" + x="5699" + y="7399" + width="2403" + height="1403" + id="rect540" + style="fill:none;stroke:none" />15 17 + class="BoundingBox" + x="10499" + y="7399" + width="2403" + height="1403" + id="rect555" + style="fill:none;stroke:none" />17 23 + class="BoundingBox" + x="15299" + y="7399" + width="2403" + height="1403" + id="rect577" + style="fill:none;stroke:none" />23 22 + class="BoundingBox" + x="12899" + y="7399" + width="2403" + height="1403" + id="rect592" + style="fill:none;stroke:none" />22 24 + class="BoundingBox" + x="3299" + y="8799" + width="2403" + height="1403" + id="rect607" + style="fill:none;stroke:none" />24 26 + class="BoundingBox" + x="8099" + y="8799" + width="2403" + height="1403" + id="rect629" + style="fill:none;stroke:none" />26 25 + class="BoundingBox" + x="5699" + y="8799" + width="2403" + height="1403" + id="rect644" + style="fill:none;stroke:none" />25 27 + class="BoundingBox" + x="10499" + y="8799" + width="2403" + height="1403" + id="rect659" + style="fill:none;stroke:none" />27 29 + class="BoundingBox" + x="15299" + y="8799" + width="2403" + height="1403" + id="rect681" + style="fill:none;stroke:none" />29 28 + class="BoundingBox" + x="12899" + y="8799" + width="2403" + height="1403" + id="rect696" + style="fill:none;stroke:none" />28 + class="BoundingBox" + x="14799" + y="9450" + width="1202" + height="301" + id="rect945" + style="fill:none;stroke:none" /> diff --git a/Documentation/media/uapi/v4l/selection.svg b/Documentation/media/uapi/v4l/selection.svg index d309187af967b107515b34e82a7d49a1ab5e038a..a93e3b59786db3b3677dd40e2fb446406d890a0f 100644 --- a/Documentation/media/uapi/v4l/selection.svg +++ b/Documentation/media/uapi/v4l/selection.svg @@ -1,5812 +1,1151 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - CROP_DEFAULT - COMPOSE_PADDED - COMPOSE_ACTIVE - COMPOSE_DEFAULT - COMPOSE_PADDED - COMPOSE_BONDS - CROP_BONDS - overscan area - CROP_ACTIVE - DATA SOURCE - DATA SINK - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CROP_DEFAULT + + + + COMPOSE_PADDED + + + COMPOSE_ACTIVE + + + COMPOSE_DEFAULT + + + COMPOSE_PADDED + + + + COMPOSE_BONDS + + + + CROP_BONDS + + + overscan area + + + CROP_ACTIVE + + + DATA SOURCE + + + + DATA SINK + + diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg index 1903dd3846c2942a9d6083223ba297a589426e49..ee1df49f83e8078c6ce4eced10d910be86db3bec 100644 --- a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg +++ b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg @@ -18,11 +18,11 @@ id="metadata100"> - image/svg+xml - - + rdf:about=""> + image/svg+xml + + diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg index 91cf51832c1205b9b7f93ed19c2816cc8087340c..c10d222b9ea963a82ed7451e3575b1a59e4af4f3 100644 --- a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg +++ b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg @@ -18,11 +18,11 @@ id="metadata260"> - image/svg+xml - - + rdf:about=""> + image/svg+xml + + diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg index cedcbf59892379f393c1f5643b5e98ac60ece79f..3cb68bf9fc047fb12ba331615bc62043336270c0 100644 --- a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg +++ b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg @@ -18,11 +18,11 @@ id="metadata186"> - image/svg+xml - - + rdf:about=""> + image/svg+xml + + diff --git a/Documentation/media/uapi/v4l/vbi_525.svg b/Documentation/media/uapi/v4l/vbi_525.svg index b05f7777ccf86124fa1e56574f7bb28a040d62a9..643aec8d0ba2cd093ebe54d59b45fcefc969fac2 100644 --- a/Documentation/media/uapi/v4l/vbi_525.svg +++ b/Documentation/media/uapi/v4l/vbi_525.svg @@ -42,19 +42,21 @@ inkscape:current-layer="g10" units="mm" />image/svg+xmlimage/svg+xml(1) + id="tspan270" + sodipodi:role="line" + y="-4035.6582" + x="1621.9453 1642.3999 1676.5522">(1) 8745691012113226312622712702672682692722732752742662652632642624923156101112784923156101112785242615252622222232328522286231st field2nd field + id="tspan274" + sodipodi:role="line" + y="-4127.7959" + x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">8745691012113226312622712702672682692722732752742662652632642624923156101112784923156101112785242615252622222232328522286231st field2nd field 874569101211322631262 + id="tspan3634" + sodipodi:role="line" + y="-4127.7959" + x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262 271270267268269272273275274266265263264262 + id="tspan3638" + sodipodi:role="line" + y="-4238.3613" + x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262 492315610111278 + id="tspan3642" + sodipodi:role="line" + y="-5049.1729" + x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278 492315610111278 + id="tspan3646" + sodipodi:role="line" + y="-4938.6074" + x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278 524 + id="tspan3650" + sodipodi:role="line" + y="-5049.1729" + x="841.81781 882.8006 923.78326">524 261 + id="tspan3654" + sodipodi:role="line" + y="-4938.6074" + x="841.81781 882.8006 923.78326">261 525 + id="tspan3658" + sodipodi:role="line" + y="-5049.1729" + x="1210.3684 1251.3512 1292.3342">525 262 + id="tspan3662" + sodipodi:role="line" + y="-4938.6074" + x="1210.3684 1251.3512 1292.3342">262 22 + id="tspan3666" + sodipodi:role="line" + y="-5049.1729" + x="6022.0161 6062.999">22 22 + id="tspan3670" + sodipodi:role="line" + y="-4938.6074" + x="6022.0161 6062.999">22 23 + id="tspan3674" + sodipodi:role="line" + y="-5049.1729" + x="6390.5669 6431.5498">23 23 + id="tspan3678" + sodipodi:role="line" + y="-4938.6074" + x="6390.5669 6431.5498">23 285 + id="tspan3682" + sodipodi:role="line" + y="-4238.3623" + x="6001.5244 6042.5068 6083.4902">285 22 + id="tspan3686" + sodipodi:role="line" + y="-4127.7964" + x="6022.0156 6062.9985">22 286 + id="tspan3690" + sodipodi:role="line" + y="-4238.3623" + x="6370.0747 6411.0571 6452.04">286 23 + id="tspan3694" + sodipodi:role="line" + y="-4127.7964" + x="6390.5664 6431.5493">23 1st field + id="tspan3698" + sodipodi:role="line" + y="-4459.4922" + x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field 2nd field + id="tspan3702" + sodipodi:role="line" + y="-3648.6809" + x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field \ No newline at end of file diff --git a/Documentation/media/uapi/v4l/vbi_625.svg b/Documentation/media/uapi/v4l/vbi_625.svg index c117ddb7bf7ee8798c47e0e7616aab474a047d5d..9b18243c0a066e931e5cd856a196aff406eb83c6 100644 --- a/Documentation/media/uapi/v4l/vbi_625.svg +++ b/Documentation/media/uapi/v4l/vbi_625.svg @@ -42,19 +42,21 @@ inkscape:current-layer="g10" units="mm" />image/svg+xmlimage/svg+xml1st field145678323123113102214567832625624623223086213096222nd field + x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895" + y="-4462.3472" + sodipodi:role="line" + id="tspan278">1st field145678323123113102214567832625624623223086213096222nd field (1) + x="2734.751 2755.2776 2789.5503" + y="-4037.021" + sodipodi:role="line" + id="tspan296">(1) 7654323133123113363353213203193183173163153143133123111822233093093103102433723232424 + x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563" + y="-4129.4834" + sodipodi:role="line" + id="tspan300">7654323133123113363353213203193183173163153143133123111822233093093103102433723232424 1st field + x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895" + y="-4462.3472" + sodipodi:role="line" + id="tspan3673">1st field 1456783231231131022 + x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337" + y="-4943.1509" + sodipodi:role="line" + id="tspan3677">1456783231231131022 1456783262562462322 + x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269" + y="-5054.1064" + sodipodi:role="line" + id="tspan3681">1456783262562462322 308 + x="842.29962 883.42694 924.55408" + y="-4943.1509" + sodipodi:role="line" + id="tspan3685">308 621 + x="842.29962 883.42694 924.55408" + y="-5054.1064" + sodipodi:role="line" + id="tspan3689">621 309 + x="1212.1489 1253.276 1294.4033" + y="-4943.1509" + sodipodi:role="line" + id="tspan3693">309 622 + x="1212.1489 1253.276 1294.4033" + y="-5054.1064" + sodipodi:role="line" + id="tspan3697">622 2nd field + x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422" + y="-3648.6792" + sodipodi:role="line" + id="tspan3701">2nd field 765432313312311 + x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563" + y="-4129.4834" + sodipodi:role="line" + id="tspan4085">765432313312311 336335321320319318317316315314313312311 + x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563" + y="-4240.4385" + sodipodi:role="line" + id="tspan4089">336335321320319318317316315314313312311 182223309 + x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055" + y="-4129.4834" + sodipodi:role="line" + id="tspan4093">182223309 309 + x="842.30634 883.43323 924.56055" + y="-4240.4385" + sodipodi:role="line" + id="tspan4097">309 310 + x="1212.1553 1253.2826 1294.4099" + y="-4129.4834" + sodipodi:role="line" + id="tspan4101">310 310 + x="1212.1553 1253.2826 1294.4099" + y="-4240.4385" + sodipodi:role="line" + id="tspan4105">310 24 + x="6410.605 6451.7319" + y="-4129.4834" + sodipodi:role="line" + id="tspan4109">24 337 + x="6390.041 6431.1685 6472.2954" + y="-4240.4385" + sodipodi:role="line" + id="tspan4113">337 23 + x="6040.7559 6081.8833" + y="-4943.1504" + sodipodi:role="line" + id="tspan4117">23 23 + x="6040.7559 6081.8833" + y="-5054.106" + sodipodi:role="line" + id="tspan4121">23 24 + x="6410.605 6451.7324" + y="-4943.1504" + sodipodi:role="line" + id="tspan4125">24 24 + x="6410.605 6451.7324" + y="-5054.106" + sodipodi:role="line" + id="tspan4129">24 \ No newline at end of file diff --git a/Documentation/media/uapi/v4l/vbi_hsync.svg b/Documentation/media/uapi/v4l/vbi_hsync.svg index 4d5c0b4f146e3f533fed3fc9d0066209f2254a38..e17ff8314e7b12ce4c0cf136767384f38a7c3fb6 100644 --- a/Documentation/media/uapi/v4l/vbi_hsync.svg +++ b/Documentation/media/uapi/v4l/vbi_hsync.svg @@ -42,27 +42,27 @@ fit-margin-right="0" fit-margin-bottom="0" />image/svg+xmlimage/svg+xmlBlack LevelSync LevelWhite Level + id="tspan114" + sodipodi:role="line" + y="-187.28558" + x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black LevelSync LevelWhite Level offset + id="tspan122" + sodipodi:role="line" + y="-270.63647" + x="159.88258 172.61443 179.55339 186.49236 198.07812 209.66391">offset Line synchr. pulseLine blanking + id="tspan126" + sodipodi:role="line" + y="-46.630745" + x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulseLine blanking Line synchr. pulse + id="tspan3475" + sodipodi:role="line" + y="-46.630745" + x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse Line blanking + id="tspan3479" + sodipodi:role="line" + y="-4.9552913" + x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking Black Level + id="tspan3609" + sodipodi:role="line" + y="-187.28558" + x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level Sync Level + id="tspan3613" + sodipodi:role="line" + y="-83.096947" + x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level White Level + id="tspan3617" + sodipodi:role="line" + y="-395.66284" + x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level \ No newline at end of file diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst index b853e48312e2bcaf41a4b182af3b28a96b69cb07..d082f9a215481eac326297cb50b8d8e5924fd717 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst @@ -147,3 +147,9 @@ appropriately. The generic error codes are described at the EINVAL The struct :c:type:`v4l2_format` ``type`` field is invalid or the requested buffer type not supported. + +EBUSY + The device is busy and cannot change the format. This could be + because or the device is streaming or buffers are allocated or + queued to the driver. Relevant for :ref:`VIDIOC_S_FMT + ` only. diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst index e0ee0f1aeb05b499ee3254878698bf2b86ac107f..3c4f58bda178448c03702b9a4ed9b5a9faca0db8 100644 --- a/Documentation/media/v4l-drivers/imx.rst +++ b/Documentation/media/v4l-drivers/imx.rst @@ -607,8 +607,9 @@ References Authors ------- -Steve Longerbeam -Philipp Zabel -Russell King + +- Steve Longerbeam +- Philipp Zabel +- Russell King Copyright (C) 2012-2017 Mentor Graphics Inc. diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst index 2e24d680605229d86f81b8089af35f72c0499dfa..10f2ce42ece2bfc232e7c7dea91b23ff16a4d5e5 100644 --- a/Documentation/media/v4l-drivers/index.rst +++ b/Documentation/media/v4l-drivers/index.rst @@ -41,6 +41,7 @@ For more details see the file COPYING in the source distribution of Linux. cx88 davinci-vpbe fimc + imx ivtv max2175 meye diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 0fde3dcf077a302eb24ea6d8fd8df65c4b252d8e..625549d4c74a09642d72bce75387c15348406223 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -435,7 +435,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: PM status to 'suspended' and update its parent's counter of 'active' children as appropriate (it is only valid to use this function if 'power.runtime_error' is set or 'power.disable_depth' is greater than - zero) + zero); it will fail and return an error code if the device has a child + which is active and the 'power.ignore_children' flag is unset bool pm_runtime_active(struct device *dev); - return true if the device's runtime PM status is 'active' or its diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt index 65ea5915178b4b9842474b7908d06660444aae34..074670b98bac784c27eecc18f672b2f1ad24f489 100644 --- a/Documentation/printk-formats.txt +++ b/Documentation/printk-formats.txt @@ -58,20 +58,23 @@ Symbols/Function Pointers %ps versatile_init %pB prev_fn_of_versatile_init+0x88/0x88 -For printing symbols and function pointers. The ``S`` and ``s`` specifiers -result in the symbol name with (``S``) or without (``s``) offsets. Where -this is used on a kernel without KALLSYMS - the symbol address is -printed instead. +The ``F`` and ``f`` specifiers are for printing function pointers, +for example, f->func, &gettimeofday. They have the same result as +``S`` and ``s`` specifiers. But they do an extra conversion on +ia64, ppc64 and parisc64 architectures where the function pointers +are actually function descriptors. + +The ``S`` and ``s`` specifiers can be used for printing symbols +from direct addresses, for example, __builtin_return_address(0), +(void *)regs->ip. They result in the symbol name with (``S``) or +without (``s``) offsets. If KALLSYMS are disabled then the symbol +address is printed instead. The ``B`` specifier results in the symbol name with offsets and should be used when printing stack backtraces. The specifier takes into consideration the effect of compiler optimisations which may occur when tail-call``s are used and marked with the noreturn GCC attribute. -On ia64, ppc64 and parisc64 architectures function pointers are -actually function descriptors which must first be resolved. The ``F`` and -``f`` specifiers perform this resolution and then provide the same -functionality as the ``S`` and ``s`` specifiers. Kernel Pointers =============== diff --git a/MAINTAINERS b/MAINTAINERS index f66488dfdbc9cc59fc744ce9f8349b9b69bd945c..1c3feffb1c1cfd2b46685907300a9f026fb67e6a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1161,7 +1161,7 @@ M: Brendan Higgins R: Benjamin Herrenschmidt R: Joel Stanley L: linux-i2c@vger.kernel.org -L: openbmc@lists.ozlabs.org +L: openbmc@lists.ozlabs.org (moderated for non-subscribers) S: Maintained F: drivers/irqchip/irq-aspeed-i2c-ic.c F: drivers/i2c/busses/i2c-aspeed.c @@ -5090,12 +5090,20 @@ M: Andrew Lunn M: Florian Fainelli L: netdev@vger.kernel.org S: Maintained -F: include/linux/phy.h -F: include/linux/phy_fixed.h -F: drivers/net/phy/ +F: Documentation/ABI/testing/sysfs-bus-mdio +F: Documentation/devicetree/bindings/net/mdio* F: Documentation/networking/phy.txt +F: drivers/net/phy/ F: drivers/of/of_mdio.c F: drivers/of/of_net.c +F: include/linux/*mdio*.h +F: include/linux/of_net.h +F: include/linux/phy.h +F: include/linux/phy_fixed.h +F: include/linux/platform_data/mdio-gpio.h +F: include/trace/events/mdio.h +F: include/uapi/linux/mdio.h +F: include/uapi/linux/mii.h EXT2 FILE SYSTEM M: Jan Kara @@ -5826,7 +5834,7 @@ F: drivers/staging/greybus/spi.c F: drivers/staging/greybus/spilib.c F: drivers/staging/greybus/spilib.h -GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS +GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS M: Bryan O'Donoghue S: Maintained F: drivers/staging/greybus/loopback.c @@ -7102,7 +7110,6 @@ M: Marc Zyngier L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core -T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core F: Documentation/devicetree/bindings/interrupt-controller/ F: drivers/irqchip/ @@ -10375,7 +10382,7 @@ L: linux-gpio@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git S: Maintained F: Documentation/devicetree/bindings/pinctrl/ -F: Documentation/pinctrl.txt +F: Documentation/driver-api/pinctl.rst F: drivers/pinctrl/ F: include/linux/pinctrl/ @@ -13996,6 +14003,7 @@ F: drivers/block/virtio_blk.c F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h F: drivers/crypto/virtio/ +F: mm/balloon_compaction.c VIRTIO CRYPTO DRIVER M: Gonglei @@ -14210,6 +14218,12 @@ F: drivers/watchdog/ F: include/linux/watchdog.h F: include/uapi/linux/watchdog.h +WHISKEYCOVE PMIC GPIO DRIVER +M: Kuppuswamy Sathyanarayanan +L: linux-gpio@vger.kernel.org +S: Maintained +F: drivers/gpio/gpio-wcove.c + WIIMOTE HID DRIVER M: David Herrmann L: linux-input@vger.kernel.org diff --git a/Makefile b/Makefile index 0662b5201d3eda544b0d2493fe65444905d51ce4..235826f957411e8a3f02225439c38d92adf00802 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc6 NAME = Fearless Coyote # *DOCUMENTATION* diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 2a07e6ecafbd768bcdca7c09fa4cc29bc1a6ff65..71d3efff99d35c56c6f30bea8a6ab1cf1702d753 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a208bfe367b55574e2ca4a901de8b3a2862aee2e..61a0cb15067ea653eaa9631fe64276385e3de064 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -380,7 +380,7 @@ config ARCH_EP93XX bool "EP93xx-based" select ARCH_HAS_HOLES_MEMORYMODEL select ARM_AMBA - select ARM_PATCH_PHYS_VIRT + imply ARM_PATCH_PHYS_VIRT select ARM_VIC select AUTO_ZRELADDR select CLKDEV_LOOKUP diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts index 895fa6cfa15a9ee1c56c5827f5f5fe2f8e512196..563901e0ec071f0c66a4590b17fe5c3f6cfcbd69 100644 --- a/arch/arm/boot/dts/armada-388-gp.dts +++ b/arch/arm/boot/dts/armada-388-gp.dts @@ -75,7 +75,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pca0_pins>; interrupt-parent = <&gpio0>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -87,7 +87,7 @@ compatible = "nxp,pca9555"; pinctrl-names = "default"; interrupt-parent = <&gpio0>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; gpio-controller; #gpio-cells = <2>; interrupt-controller; diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index a423e8ebfb3758057d41a4e9f77505df5dc1d783..67e72bc72e805be995b0d44fa68f74e29161b140 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts @@ -301,25 +301,4 @@ pinctrl-names = "default"; pinctrl-0 = <&vpif_capture_pins>, <&vpif_display_pins>; status = "okay"; - - /* VPIF capture port */ - port@0 { - vpif_input_ch0: endpoint@0 { - reg = <0>; - bus-width = <8>; - }; - - vpif_input_ch1: endpoint@1 { - reg = <1>; - bus-width = <8>; - data-shift = <8>; - }; - }; - - /* VPIF display port */ - port@1 { - vpif_output_ch0: endpoint { - bus-width = <8>; - }; - }; }; diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts index b837fec70eec8cb7d241d14559c8fad9c0cbb909..a0f0916156e66d83949716c4b571184695fbef3b 100644 --- a/arch/arm/boot/dts/da850-lcdk.dts +++ b/arch/arm/boot/dts/da850-lcdk.dts @@ -318,11 +318,4 @@ pinctrl-names = "default"; pinctrl-0 = <&vpif_capture_pins>; status = "okay"; - - /* VPIF capture port */ - port { - vpif_ch0: endpoint { - bus-width = <8>; - }; - }; }; diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index 1865976db5f9a3e11aad03cdd951d2f3af83c78d..c72a2132aa823b053c5ab9450a92faf266c85878 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts @@ -68,6 +68,34 @@ DM816X_IOPAD(0x0d08, MUX_MODE0) /* USB1_DRVVBUS */ >; }; + + nandflash_pins: nandflash_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0b38, PULL_UP | MUX_MODE0) /* PINCTRL207 GPMC_CS0*/ + DM816X_IOPAD(0x0b60, PULL_ENA | MUX_MODE0) /* PINCTRL217 GPMC_ADV_ALE */ + DM816X_IOPAD(0x0b54, PULL_UP | PULL_ENA | MUX_MODE0) /* PINCTRL214 GPMC_OE_RE */ + DM816X_IOPAD(0x0b58, PULL_ENA | MUX_MODE0) /* PINCTRL215 GPMC_BE0_CLE */ + DM816X_IOPAD(0x0b50, PULL_UP | MUX_MODE0) /* PINCTRL213 GPMC_WE */ + DM816X_IOPAD(0x0b6c, MUX_MODE0) /* PINCTRL220 GPMC_WAIT */ + DM816X_IOPAD(0x0be4, PULL_ENA | MUX_MODE0) /* PINCTRL250 GPMC_CLK */ + DM816X_IOPAD(0x0ba4, MUX_MODE0) /* PINCTRL234 GPMC_D0 */ + DM816X_IOPAD(0x0ba8, MUX_MODE0) /* PINCTRL234 GPMC_D1 */ + DM816X_IOPAD(0x0bac, MUX_MODE0) /* PINCTRL234 GPMC_D2 */ + DM816X_IOPAD(0x0bb0, MUX_MODE0) /* PINCTRL234 GPMC_D3 */ + DM816X_IOPAD(0x0bb4, MUX_MODE0) /* PINCTRL234 GPMC_D4 */ + DM816X_IOPAD(0x0bb8, MUX_MODE0) /* PINCTRL234 GPMC_D5 */ + DM816X_IOPAD(0x0bbc, MUX_MODE0) /* PINCTRL234 GPMC_D6 */ + DM816X_IOPAD(0x0bc0, MUX_MODE0) /* PINCTRL234 GPMC_D7 */ + DM816X_IOPAD(0x0bc4, MUX_MODE0) /* PINCTRL234 GPMC_D8 */ + DM816X_IOPAD(0x0bc8, MUX_MODE0) /* PINCTRL234 GPMC_D9 */ + DM816X_IOPAD(0x0bcc, MUX_MODE0) /* PINCTRL234 GPMC_D10 */ + DM816X_IOPAD(0x0bd0, MUX_MODE0) /* PINCTRL234 GPMC_D11 */ + DM816X_IOPAD(0x0bd4, MUX_MODE0) /* PINCTRL234 GPMC_D12 */ + DM816X_IOPAD(0x0bd8, MUX_MODE0) /* PINCTRL234 GPMC_D13 */ + DM816X_IOPAD(0x0bdc, MUX_MODE0) /* PINCTRL234 GPMC_D14 */ + DM816X_IOPAD(0x0be0, MUX_MODE0) /* PINCTRL234 GPMC_D15 */ + >; + }; }; &i2c1 { @@ -90,6 +118,8 @@ &gpmc { ranges = <0 0 0x04000000 0x01000000>; /* CS0: 16MB for NAND */ + pinctrl-names = "default"; + pinctrl-0 = <&nandflash_pins>; nand@0,0 { compatible = "ti,omap2-nand"; @@ -98,9 +128,11 @@ interrupt-parent = <&gpmc>; interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ + rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ #address-cells = <1>; #size-cells = <1>; ti,nand-ecc-opt = "bch8"; + ti,elm-id = <&elm>; nand-bus-width = <16>; gpmc,device-width = <2>; gpmc,sync-clk-ps = <0>; @@ -164,7 +196,7 @@ vmmc-supply = <&vmmcsd_fixed>; bus-width = <4>; cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>; }; /* At least dm8168-evm rev c won't support multipoint, later may */ diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index 59cbf958fcc3178c4b56aad647c340e40a63c098..566b2a8c8b96853da331571c233dc786d8adc187 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -145,7 +145,7 @@ }; elm: elm@48080000 { - compatible = "ti,816-elm"; + compatible = "ti,am3352-elm"; ti,hwmods = "elm"; reg = <0x48080000 0x2000>; interrupts = <4>; diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts index 4d57a55473afd1f563667d948b49269e11028697..a6298eb56978710c24291fc05d17770fefccd188 100644 --- a/arch/arm/boot/dts/dra71-evm.dts +++ b/arch/arm/boot/dts/dra71-evm.dts @@ -190,7 +190,7 @@ ti,rx-internal-delay = ; ti,tx-internal-delay = ; ti,fifo-depth = ; - ti,impedance-control = <0x1f>; + ti,min-output-impedance; }; dp83867_1: ethernet-phy@3 { @@ -198,7 +198,7 @@ ti,rx-internal-delay = ; ti,tx-internal-delay = ; ti,fifo-depth = ; - ti,impedance-control = <0x1f>; + ti,min-output-impedance; }; }; diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index 497a9470c8881bca6e49800f90c0f5d20dd3d84a..5739389f5bb877ef7b29455a1bcd47328d7223ed 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -59,6 +59,9 @@ compatible = "samsung,exynos4210-audss-clock"; reg = <0x03810000 0x0C>; #clock-cells = <1>; + clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>, + <&clock CLK_SCLK_AUDIO0>, <&clock CLK_SCLK_AUDIO0>; + clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in"; }; i2s0: i2s@03830000 { diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index dfcc8e00cf1c53a753dc753c2c5598b220b480a1..0ade3619f3c3f1332b8d89c8d06ed4a67f29025b 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -297,6 +297,7 @@ #address-cells = <1>; #size-cells = <1>; status = "disabled"; + ranges; adc: adc@50030800 { compatible = "fsl,imx25-gcq"; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi index aeaa5a6e4fcf462bc8a381ab20ba85c3579dea5c..a24e4f1911abe77917726d8ff8342f7dcb88dab2 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi @@ -507,7 +507,7 @@ pinctrl_pcie: pciegrp { fsl,pins = < /* PCIe reset */ - MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0 + MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0 MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0 >; }; @@ -668,7 +668,7 @@ &pcie { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcie>; - reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>; + reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 54c45402286b10dc06240aef9ec7d015a3fd5a58..0a24d1bf3c393463148919e685a879df39af58ef 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -557,6 +557,14 @@ >; }; + pinctrl_spi4: spi4grp { + fsl,pins = < + MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59 + MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59 + MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59 + >; + }; + pinctrl_tsc2046_pendown: tsc2046_pendown { fsl,pins = < MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59 @@ -697,13 +705,5 @@ fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x110b0 >; - - pinctrl_spi4: spi4grp { - fsl,pins = < - MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59 - MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59 - MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59 - >; - }; }; }; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 2484f11761ea24f3bfa44a81e7309442399367d9..858e1fed762a1df80f33bac4576b5d900e021bcf 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -1126,8 +1126,8 @@ }; }; - gpu: mali@ffa30000 { - compatible = "rockchip,rk3288-mali", "arm,mali-t760", "arm,mali-midgard"; + gpu: gpu@ffa30000 { + compatible = "rockchip,rk3288-mali", "arm,mali-t760"; reg = <0xffa30000 0x10000>; interrupts = , , diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index cc06da3943668415e9b6379dbe38d2fde36518ec..60e69aeacbdbf4dff78923f227f96f8bc49c1fbc 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -303,7 +303,7 @@ #size-cells = <1>; atmel,smc = <&hsmc>; reg = <0x10000000 0x10000000 - 0x40000000 0x30000000>; + 0x60000000 0x30000000>; ranges = <0x0 0x0 0x10000000 0x10000000 0x1 0x0 0x60000000 0x10000000 0x2 0x0 0x70000000 0x10000000 @@ -1048,18 +1048,18 @@ }; hsmc: hsmc@f8014000 { - compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd"; + compatible = "atmel,sama5d2-smc", "syscon", "simple-mfd"; reg = <0xf8014000 0x1000>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>; + interrupts = <17 IRQ_TYPE_LEVEL_HIGH 6>; clocks = <&hsmc_clk>; #address-cells = <1>; #size-cells = <1>; ranges; - pmecc: ecc-engine@ffffc070 { + pmecc: ecc-engine@f8014070 { compatible = "atmel,sama5d2-pmecc"; - reg = <0xffffc070 0x490>, - <0xffffc500 0x100>; + reg = <0xf8014070 0x490>, + <0xf8014500 0x100>; }; }; diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi index 8923ba625b76f156f27fde9c66f1f72ad044405a..19a8f4fcfab50ef5300360d980f1738d99345a94 100644 --- a/arch/arm/boot/dts/sun8i-a83t.dtsi +++ b/arch/arm/boot/dts/sun8i-a83t.dtsi @@ -44,7 +44,9 @@ #include +#include #include +#include / { interrupt-parent = <&gic>; @@ -175,8 +177,8 @@ compatible = "allwinner,sun8i-a83t-dma"; reg = <0x01c02000 0x1000>; interrupts = ; - clocks = <&ccu 21>; - resets = <&ccu 7>; + clocks = <&ccu CLK_BUS_DMA>; + resets = <&ccu RST_BUS_DMA>; #dma-cells = <1>; }; @@ -195,7 +197,7 @@ , ; reg = <0x01c20800 0x400>; - clocks = <&ccu 45>, <&osc24M>, <&osc16Md512>; + clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc16Md512>; clock-names = "apb", "hosc", "losc"; gpio-controller; interrupt-controller; @@ -247,8 +249,8 @@ "allwinner,sun8i-h3-spdif"; reg = <0x01c21000 0x400>; interrupts = ; - clocks = <&ccu 44>, <&ccu 76>; - resets = <&ccu 32>; + clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>; + resets = <&ccu RST_BUS_SPDIF>; clock-names = "apb", "spdif"; dmas = <&dma 2>; dma-names = "tx"; @@ -263,8 +265,8 @@ interrupts = ; reg-shift = <2>; reg-io-width = <4>; - clocks = <&ccu 53>; - resets = <&ccu 40>; + clocks = <&ccu CLK_BUS_UART0>; + resets = <&ccu RST_BUS_UART0>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index 6f2162608006770896df2adcc258a4ed2a3b63a9..d38282b9e5d442cbf1709e38f8a64ee9bfacce1c 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -394,7 +394,7 @@ emac: ethernet@1c30000 { compatible = "allwinner,sun8i-h3-emac"; syscon = <&syscon>; - reg = <0x01c30000 0x104>; + reg = <0x01c30000 0x10000>; interrupts = ; interrupt-names = "macirq"; resets = <&ccu RST_BUS_EMAC>; diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts index 86d8df98802fdf66a651adb04de5c4be49af3fcc..13bcc460bcb2adf2ab17775bc0aa2c01bc694aed 100644 --- a/arch/arm/boot/dts/tango4-vantage-1172.dts +++ b/arch/arm/boot/dts/tango4-vantage-1172.dts @@ -22,7 +22,7 @@ }; ð0 { - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; phy-handle = <ð0_phy>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index 1869af6bac5ceee7178b7e0126ecc88816d4d0c5..25021b798a1e4d857567df78349e5d7c781aa5a3 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -19,6 +19,11 @@ #ifndef __ASSEMBLY__ +#define ARCH_HAS_KIMAGE_ARCH +struct kimage_arch { + u32 kernel_r2; +}; + /** * crash_setup_regs() - save registers for the panic kernel * @newregs: registers are saved here diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 3f2eb76243e3c5f9d387959acae740ce871e5afa..d5562f9ce60079139d360e5d6afac59469051454 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) +arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end) { tlb->mm = mm; tlb->fullmm = !(start | (end+1)); @@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start } static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force) { + if (force) { + tlb->range_start = start; + tlb->range_end = end; + } + tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index 14749aec94bf3847148d3e6532f6507d06a2e15a..921d8274855c74cc5f1ba170db869dbe056d03b4 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h @@ -35,6 +35,12 @@ struct ucontext { * bytes, to prevent unpredictable padding in the signal frame. */ +/* + * Dummy padding block: if this magic is encountered, the block should + * be skipped using the corresponding size field. + */ +#define DUMMY_MAGIC 0xb0d9ed01 + #ifdef CONFIG_CRUNCH #define CRUNCH_MAGIC 0x5065cf03 #define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8) diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 15495887ca14eedc883daa60b9a9a034913f0c3e..fe1419eeb9321ea553fcd68462596dadaf385185 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -30,7 +30,6 @@ extern unsigned long kexec_boot_atags; static atomic_t waiting_for_crash_ipi; -static unsigned long dt_mem; /* * Provide a dummy crash_notes definition while crash dump arrives to arm. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. @@ -42,6 +41,9 @@ int machine_kexec_prepare(struct kimage *image) __be32 header; int i, err; + image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET + + KEXEC_ARM_ATAGS_OFFSET; + /* * Validate that if the current HW supports SMP, then the SW supports * and implements CPU hotplug for the current HW. If not, we won't be @@ -66,8 +68,8 @@ int machine_kexec_prepare(struct kimage *image) if (err) return err; - if (be32_to_cpu(header) == OF_DT_HEADER) - dt_mem = current_segment->mem; + if (header == cpu_to_be32(OF_DT_HEADER)) + image->arch.kernel_r2 = current_segment->mem; } return 0; } @@ -165,8 +167,7 @@ void machine_kexec(struct kimage *image) kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; - kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET - + KEXEC_ARM_ATAGS_OFFSET; + kexec_boot_atags = image->arch.kernel_r2; /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 4e80bf7420d4e65fb30e0c68e7bef53932f765b3..8e9a3e40d949567598cfdcc8146823f5ada96cb2 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -987,6 +987,9 @@ static void __init reserve_crashkernel(void) if (crash_base <= 0) { unsigned long long crash_max = idmap_to_phys((u32)~0); + unsigned long long lowmem_max = __pa(high_memory - 1) + 1; + if (crash_max > lowmem_max) + crash_max = lowmem_max; crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, crash_size, CRASH_ALIGN); if (!crash_base) { diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7b8f2141427bda172899bfe8ae5113367163af47..5814298ef0b701e61e4de018aa216009c9445d30 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -40,8 +40,10 @@ static int preserve_crunch_context(struct crunch_sigframe __user *frame) return __copy_to_user(frame, kframe, sizeof(*frame)); } -static int restore_crunch_context(struct crunch_sigframe __user *frame) +static int restore_crunch_context(char __user **auxp) { + struct crunch_sigframe __user *frame = + (struct crunch_sigframe __user *)*auxp; char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; @@ -52,6 +54,7 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame) if (kframe->magic != CRUNCH_MAGIC || kframe->size != CRUNCH_STORAGE_SIZE) return -1; + *auxp += CRUNCH_STORAGE_SIZE; crunch_task_restore(current_thread_info(), &kframe->storage); return 0; } @@ -59,21 +62,39 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame) #ifdef CONFIG_IWMMXT -static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) +static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; + int err = 0; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); - kframe->magic = IWMMXT_MAGIC; - kframe->size = IWMMXT_STORAGE_SIZE; - iwmmxt_task_copy(current_thread_info(), &kframe->storage); - return __copy_to_user(frame, kframe, sizeof(*frame)); + + if (test_thread_flag(TIF_USING_IWMMXT)) { + kframe->magic = IWMMXT_MAGIC; + kframe->size = IWMMXT_STORAGE_SIZE; + iwmmxt_task_copy(current_thread_info(), &kframe->storage); + + err = __copy_to_user(frame, kframe, sizeof(*frame)); + } else { + /* + * For bug-compatibility with older kernels, some space + * has to be reserved for iWMMXt even if it's not used. + * Set the magic and size appropriately so that properly + * written userspace can skip it reliably: + */ + __put_user_error(DUMMY_MAGIC, &frame->magic, err); + __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err); + } + + return err; } -static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) +static int restore_iwmmxt_context(char __user **auxp) { + struct iwmmxt_sigframe __user *frame = + (struct iwmmxt_sigframe __user *)*auxp; char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; @@ -81,10 +102,28 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; - if (kframe->magic != IWMMXT_MAGIC || - kframe->size != IWMMXT_STORAGE_SIZE) + + /* + * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy + * block is discarded for compatibility with setup_sigframe() if + * present, but we don't mandate its presence. If some other + * magic is here, it's not for us: + */ + if (!test_thread_flag(TIF_USING_IWMMXT) && + kframe->magic != DUMMY_MAGIC) + return 0; + + if (kframe->size != IWMMXT_STORAGE_SIZE) return -1; - iwmmxt_task_restore(current_thread_info(), &kframe->storage); + + if (test_thread_flag(TIF_USING_IWMMXT)) { + if (kframe->magic != IWMMXT_MAGIC) + return -1; + + iwmmxt_task_restore(current_thread_info(), &kframe->storage); + } + + *auxp += IWMMXT_STORAGE_SIZE; return 0; } @@ -107,8 +146,10 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame) return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); } -static int restore_vfp_context(struct vfp_sigframe __user *frame) +static int restore_vfp_context(char __user **auxp) { + struct vfp_sigframe __user *frame = + (struct vfp_sigframe __user *)*auxp; unsigned long magic; unsigned long size; int err = 0; @@ -121,6 +162,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; + *auxp += size; return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); } @@ -141,7 +183,7 @@ struct rt_sigframe { static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { - struct aux_sigframe __user *aux; + char __user *aux; sigset_t set; int err; @@ -169,18 +211,18 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= !valid_user_regs(regs); - aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; + aux = (char __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) - err |= restore_crunch_context(&aux->crunch); + err |= restore_crunch_context(&aux); #endif #ifdef CONFIG_IWMMXT - if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) - err |= restore_iwmmxt_context(&aux->iwmmxt); + if (err == 0) + err |= restore_iwmmxt_context(&aux); #endif #ifdef CONFIG_VFP if (err == 0) - err |= restore_vfp_context(&aux->vfp); + err |= restore_vfp_context(&aux); #endif return err; @@ -286,7 +328,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= preserve_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT - if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) + if (err == 0) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index b5625d0092881bbbbb1022de2664c51f2a304844..e568c8c6f69cb67bf423db51f25d6315a15fbc96 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -1166,7 +1166,7 @@ static struct tvp514x_platform_data tvp5146_pdata = { #define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) -static const struct vpif_input da850_ch0_inputs[] = { +static struct vpif_input da850_ch0_inputs[] = { { .input = { .index = 0, @@ -1181,7 +1181,7 @@ static const struct vpif_input da850_ch0_inputs[] = { }, }; -static const struct vpif_input da850_ch1_inputs[] = { +static struct vpif_input da850_ch1_inputs[] = { { .input = { .index = 0, diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c index f5dce9b4e617df833cd210bac2fe65ac4a93b788..f77a4f7660505fe949af782e50b99c08b12a5570 100644 --- a/arch/arm/mach-davinci/clock.c +++ b/arch/arm/mach-davinci/clock.c @@ -218,6 +218,15 @@ int clk_set_parent(struct clk *clk, struct clk *parent) } EXPORT_SYMBOL(clk_set_parent); +struct clk *clk_get_parent(struct clk *clk) +{ + if (!clk) + return NULL; + + return clk->parent; +} +EXPORT_SYMBOL(clk_get_parent); + int clk_register(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 39ef3b613912f8888013c30d591e4ad4f0cd8095..beec5f16443a29fb5a62f6deb8969ea33cff5b6d 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -475,6 +475,26 @@ int clk_set_rate(struct clk *clk, unsigned long rate) } EXPORT_SYMBOL(clk_set_rate); +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + WARN_ON(clk); + return 0; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + WARN_ON(clk); + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + return clk->parent; +} +EXPORT_SYMBOL(clk_get_parent); + static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 }; static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 }; diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h index 7a0c13bf42694b724e0fcb35439070eefde0a646..844e8ac593e270125980df3ecced92b6d6322f5e 100644 --- a/arch/arm/mach-ixp4xx/include/mach/io.h +++ b/arch/arm/mach-ixp4xx/include/mach/io.h @@ -95,8 +95,10 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p) } static inline void __indirect_writesb(volatile void __iomem *bus_addr, - const u8 *vaddr, int count) + const void *p, int count) { + const u8 *vaddr = p; + while (count--) writeb(*vaddr++, bus_addr); } @@ -118,8 +120,10 @@ static inline void __indirect_writew(u16 value, volatile void __iomem *p) } static inline void __indirect_writesw(volatile void __iomem *bus_addr, - const u16 *vaddr, int count) + const void *p, int count) { + const u16 *vaddr = p; + while (count--) writew(*vaddr++, bus_addr); } @@ -137,8 +141,9 @@ static inline void __indirect_writel(u32 value, volatile void __iomem *p) } static inline void __indirect_writesl(volatile void __iomem *bus_addr, - const u32 *vaddr, int count) + const void *p, int count) { + const u32 *vaddr = p; while (count--) writel(*vaddr++, bus_addr); } @@ -160,8 +165,10 @@ static inline u8 __indirect_readb(const volatile void __iomem *p) } static inline void __indirect_readsb(const volatile void __iomem *bus_addr, - u8 *vaddr, u32 count) + void *p, u32 count) { + u8 *vaddr = p; + while (count--) *vaddr++ = readb(bus_addr); } @@ -183,8 +190,10 @@ static inline u16 __indirect_readw(const volatile void __iomem *p) } static inline void __indirect_readsw(const volatile void __iomem *bus_addr, - u16 *vaddr, u32 count) + void *p, u32 count) { + u16 *vaddr = p; + while (count--) *vaddr++ = readw(bus_addr); } @@ -204,8 +213,10 @@ static inline u32 __indirect_readl(const volatile void __iomem *p) } static inline void __indirect_readsl(const volatile void __iomem *bus_addr, - u32 *vaddr, u32 count) + void *p, u32 count) { + u32 *vaddr = p; + while (count--) *vaddr++ = readl(bus_addr); } @@ -523,8 +534,15 @@ static inline void iowrite32_rep(void __iomem *addr, const void *vaddr, #endif } -#define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET)) -#define ioport_unmap(addr) +#define ioport_map(port, nr) ioport_map(port, nr) +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return ((void __iomem*)((port) + PIO_OFFSET)); +} +#define ioport_unmap(addr) ioport_unmap(addr) +static inline void ioport_unmap(void __iomem *addr) +{ +} #endif /* CONFIG_PCI */ #endif /* __ASM_ARM_ARCH_IO_H */ diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c index 3330ac7cfbefc78388b78ad06a47ab102a9d9a79..671c7a09ab3d65a43b1effae5fd3bffbcb9c22cb 100644 --- a/arch/arm/mach-mmp/devices.c +++ b/arch/arm/mach-mmp/devices.c @@ -238,7 +238,7 @@ void pxa_usb_phy_deinit(void __iomem *phy_reg) #endif #if IS_ENABLED(CONFIG_USB_SUPPORT) -static u64 usb_dma_mask = ~(u32)0; +static u64 __maybe_unused usb_dma_mask = ~(u32)0; #if IS_ENABLED(CONFIG_USB_MV_UDC) struct resource pxa168_u2o_resources[] = { diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c index e62273aacb43681f2bd8ab0c7e95d82d2c32020f..4ffbbd217e8286e18181fac98487a8860ead6372 100644 --- a/arch/arm/mach-mvebu/platsmp.c +++ b/arch/arm/mach-mvebu/platsmp.c @@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr) return PTR_ERR(base); writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG); - writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG); + writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG); iounmap(base); diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index 6613a6ff5dbc9e35512ac94fd84904ddf2a63ee1..6cbc69c92913dbc4074a65e269bb5243e9193cdb 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -510,6 +510,7 @@ static void __init ams_delta_init(void) static void modem_pm(struct uart_port *port, unsigned int state, unsigned old) { struct modem_private_data *priv = port->private_data; + int ret; if (IS_ERR(priv->regulator)) return; @@ -518,9 +519,16 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old) return; if (state == 0) - regulator_enable(priv->regulator); + ret = regulator_enable(priv->regulator); else if (old == 0) - regulator_disable(priv->regulator); + ret = regulator_disable(priv->regulator); + else + ret = 0; + + if (ret) + dev_warn(port->dev, + "ams_delta modem_pm: failed to %sable regulator: %d\n", + state ? "dis" : "en", ret); } static struct plat_serial8250_port ams_delta_modem_ports[] = { diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index 4dfb995048103b8bff965b6119f3665e54887a40..95ac1929aede4d3f82cabcf627ccf548d369b6c6 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c @@ -441,13 +441,11 @@ static struct spi_board_info __initdata mistral_boardinfo[] = { { .chip_select = 0, } }; -#ifdef CONFIG_PM static irqreturn_t osk_mistral_wake_interrupt(int irq, void *ignored) { return IRQ_HANDLED; } -#endif static void __init osk_mistral_init(void) { @@ -515,7 +513,6 @@ static void __init osk_mistral_init(void) gpio_direction_input(OMAP_MPUIO(2)); irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); -#ifdef CONFIG_PM /* share the IRQ in case someone wants to use the * button for more than wakeup from system sleep. */ @@ -529,7 +526,6 @@ static void __init osk_mistral_init(void) ret); } else enable_irq_wake(irq); -#endif } else printk(KERN_ERR "OSK+Mistral: wakeup button is awol\n"); diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index dc9e34e670a26f280bdfe7947fa8709ee750465f..b1e661bb5521e4281a487c25cbfea12c0b29f91f 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -28,7 +28,7 @@ static const struct of_device_id omap_dt_match_table[] __initconst = { { } }; -static void __init omap_generic_init(void) +static void __init __maybe_unused omap_generic_init(void) { pdata_quirks_init(omap_dt_match_table); diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index be517b048762b4fe1bb5467ebaea61a19cc1386e..5b614388d72f4b7ca8be1c851f1369f719ce2c29 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -32,120 +32,6 @@ static u16 control_devconf1_offset; #define HSMMC_NAME_LEN 9 -static void omap_hsmmc1_before_set_reg(struct device *dev, - int power_on, int vdd) -{ - u32 reg, prog_io; - struct omap_hsmmc_platform_data *mmc = dev->platform_data; - - if (mmc->remux) - mmc->remux(dev, power_on); - - /* - * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the - * card with Vcc regulator (from twl4030 or whatever). OMAP has both - * 1.8V and 3.0V modes, controlled by the PBIAS register. - * - * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which - * is most naturally TWL VSIM; those pins also use PBIAS. - * - * FIXME handle VMMC1A as needed ... - */ - if (power_on) { - if (cpu_is_omap2430()) { - reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1); - if ((1 << vdd) >= MMC_VDD_30_31) - reg |= OMAP243X_MMC1_ACTIVE_OVERWRITE; - else - reg &= ~OMAP243X_MMC1_ACTIVE_OVERWRITE; - omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1); - } - - if (mmc->internal_clock) { - reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); - reg |= OMAP2_MMCSDIO1ADPCLKISEL; - omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0); - } - - reg = omap_ctrl_readl(control_pbias_offset); - if (cpu_is_omap3630()) { - /* Set MMC I/O to 52MHz */ - prog_io = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1); - prog_io |= OMAP3630_PRG_SDMMC1_SPEEDCTRL; - omap_ctrl_writel(prog_io, OMAP343X_CONTROL_PROG_IO1); - } else { - reg |= OMAP2_PBIASSPEEDCTRL0; - } - reg &= ~OMAP2_PBIASLITEPWRDNZ0; - omap_ctrl_writel(reg, control_pbias_offset); - } else { - reg = omap_ctrl_readl(control_pbias_offset); - reg &= ~OMAP2_PBIASLITEPWRDNZ0; - omap_ctrl_writel(reg, control_pbias_offset); - } -} - -static void omap_hsmmc1_after_set_reg(struct device *dev, int power_on, int vdd) -{ - u32 reg; - - /* 100ms delay required for PBIAS configuration */ - msleep(100); - - if (power_on) { - reg = omap_ctrl_readl(control_pbias_offset); - reg |= (OMAP2_PBIASLITEPWRDNZ0 | OMAP2_PBIASSPEEDCTRL0); - if ((1 << vdd) <= MMC_VDD_165_195) - reg &= ~OMAP2_PBIASLITEVMODE0; - else - reg |= OMAP2_PBIASLITEVMODE0; - omap_ctrl_writel(reg, control_pbias_offset); - } else { - reg = omap_ctrl_readl(control_pbias_offset); - reg |= (OMAP2_PBIASSPEEDCTRL0 | OMAP2_PBIASLITEPWRDNZ0 | - OMAP2_PBIASLITEVMODE0); - omap_ctrl_writel(reg, control_pbias_offset); - } -} - -static void hsmmc2_select_input_clk_src(struct omap_hsmmc_platform_data *mmc) -{ - u32 reg; - - reg = omap_ctrl_readl(control_devconf1_offset); - if (mmc->internal_clock) - reg |= OMAP2_MMCSDIO2ADPCLKISEL; - else - reg &= ~OMAP2_MMCSDIO2ADPCLKISEL; - omap_ctrl_writel(reg, control_devconf1_offset); -} - -static void hsmmc2_before_set_reg(struct device *dev, int power_on, int vdd) -{ - struct omap_hsmmc_platform_data *mmc = dev->platform_data; - - if (mmc->remux) - mmc->remux(dev, power_on); - - if (power_on) - hsmmc2_select_input_clk_src(mmc); -} - -static int am35x_hsmmc2_set_power(struct device *dev, int power_on, int vdd) -{ - struct omap_hsmmc_platform_data *mmc = dev->platform_data; - - if (power_on) - hsmmc2_select_input_clk_src(mmc); - - return 0; -} - -static int nop_mmc_set_power(struct device *dev, int power_on, int vdd) -{ - return 0; -} - static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, struct omap_hsmmc_platform_data *mmc) { @@ -157,101 +43,11 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, return -ENOMEM; } - if (c->name) - strncpy(hc_name, c->name, HSMMC_NAME_LEN); - else - snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", - c->mmc, 1); + snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1); mmc->name = hc_name; mmc->caps = c->caps; - mmc->internal_clock = !c->ext_clock; mmc->reg_offset = 0; - if (c->cover_only) { - /* detect if mobile phone cover removed */ - mmc->gpio_cd = -EINVAL; - mmc->gpio_cod = c->gpio_cd; - } else { - /* card detect pin on the mmc socket itself */ - mmc->gpio_cd = c->gpio_cd; - mmc->gpio_cod = -EINVAL; - } - mmc->gpio_wp = c->gpio_wp; - - mmc->remux = c->remux; - mmc->init_card = c->init_card; - - if (c->nonremovable) - mmc->nonremovable = 1; - - /* - * NOTE: MMC slots should have a Vcc regulator set up. - * This may be from a TWL4030-family chip, another - * controllable regulator, or a fixed supply. - * - * temporary HACK: ocr_mask instead of fixed supply - */ - if (soc_is_am35xx()) - mmc->ocr_mask = MMC_VDD_165_195 | - MMC_VDD_26_27 | - MMC_VDD_27_28 | - MMC_VDD_29_30 | - MMC_VDD_30_31 | - MMC_VDD_31_32; - else - mmc->ocr_mask = c->ocr_mask; - - if (!soc_is_am35xx()) - mmc->features |= HSMMC_HAS_PBIAS; - - switch (c->mmc) { - case 1: - if (mmc->features & HSMMC_HAS_PBIAS) { - /* on-chip level shifting via PBIAS0/PBIAS1 */ - mmc->before_set_reg = - omap_hsmmc1_before_set_reg; - mmc->after_set_reg = - omap_hsmmc1_after_set_reg; - } - - if (soc_is_am35xx()) - mmc->set_power = nop_mmc_set_power; - - /* OMAP3630 HSMMC1 supports only 4-bit */ - if (cpu_is_omap3630() && - (c->caps & MMC_CAP_8_BIT_DATA)) { - c->caps &= ~MMC_CAP_8_BIT_DATA; - c->caps |= MMC_CAP_4_BIT_DATA; - mmc->caps = c->caps; - } - break; - case 2: - if (soc_is_am35xx()) - mmc->set_power = am35x_hsmmc2_set_power; - - if (c->ext_clock) - c->transceiver = 1; - if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) { - c->caps &= ~MMC_CAP_8_BIT_DATA; - c->caps |= MMC_CAP_4_BIT_DATA; - } - if (mmc->features & HSMMC_HAS_PBIAS) { - /* off-chip level shifting, or none */ - mmc->before_set_reg = hsmmc2_before_set_reg; - mmc->after_set_reg = NULL; - } - break; - case 3: - case 4: - case 5: - mmc->before_set_reg = NULL; - mmc->after_set_reg = NULL; - break; - default: - pr_err("MMC%d configuration not supported!\n", c->mmc); - kfree(hc_name); - return -ENODEV; - } return 0; } @@ -260,7 +56,6 @@ static int omap_hsmmc_done; void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) { struct platform_device *pdev; - struct omap_hsmmc_platform_data *mmc_pdata; int res; if (omap_hsmmc_done != 1) @@ -269,32 +64,12 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) omap_hsmmc_done++; for (; c->mmc; c++) { - if (!c->deferred) - continue; - pdev = c->pdev; if (!pdev) continue; - - mmc_pdata = pdev->dev.platform_data; - if (!mmc_pdata) - continue; - - if (c->cover_only) { - /* detect if mobile phone cover removed */ - mmc_pdata->gpio_cd = -EINVAL; - mmc_pdata->gpio_cod = c->gpio_cd; - } else { - /* card detect pin on the mmc socket itself */ - mmc_pdata->gpio_cd = c->gpio_cd; - mmc_pdata->gpio_cod = -EINVAL; - } - mmc_pdata->gpio_wp = c->gpio_wp; - res = omap_device_register(pdev); if (res) - pr_err("Could not late init MMC %s\n", - c->name); + pr_err("Could not late init MMC\n"); } } @@ -336,13 +111,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo, if (oh->dev_attr != NULL) { mmc_dev_attr = oh->dev_attr; mmc_data->controller_flags = mmc_dev_attr->flags; - /* - * erratum 2.1.1.128 doesn't apply if board has - * a transceiver is attached - */ - if (hsmmcinfo->transceiver) - mmc_data->controller_flags &= - ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ; } pdev = platform_device_alloc(name, ctrl_nr - 1); @@ -367,9 +135,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo, hsmmcinfo->pdev = pdev; - if (hsmmcinfo->deferred) - goto free_mmc; - res = omap_device_register(pdev); if (res) { pr_err("Could not register od for %s\n", name); diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h index 69b619ddc7650c7eac714898b0219a925005a7b5..af9af5094ec33811e0fa5fba562e87a51e7568ff 100644 --- a/arch/arm/mach-omap2/hsmmc.h +++ b/arch/arm/mach-omap2/hsmmc.h @@ -12,18 +12,9 @@ struct omap2_hsmmc_info { u8 mmc; /* controller 1/2/3 */ u32 caps; /* 4/8 wires and any additional host * capabilities OR'd (ref. linux/mmc/host.h) */ - bool transceiver; /* MMC-2 option */ - bool ext_clock; /* use external pin for input clock */ - bool cover_only; /* No card detect - just cover switch */ - bool nonremovable; /* Nonremovable e.g. eMMC */ - bool deferred; /* mmc needs a deferred probe */ int gpio_cd; /* or -EINVAL */ int gpio_wp; /* or -EINVAL */ - char *name; /* or NULL for default */ struct platform_device *pdev; /* mmc controller instance */ - int ocr_mask; /* temporary HACK */ - /* Remux (pad configuration) when powering on/off */ - void (*remux)(struct device *dev, int power_on); /* init some special card */ void (*init_card)(struct mmc_card *card); }; diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 1d739d1a0a657aec9ee73450154ff5b5197f8939..1cd20e4d56b06fe2afb4ce7d79bc310446e07762 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -410,7 +410,7 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data) return omap_hwmod_set_postsetup_state(oh, *(u8 *)data); } -static void __init omap_hwmod_init_postsetup(void) +static void __init __maybe_unused omap_hwmod_init_postsetup(void) { u8 postsetup_state; diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index d44e0e2f11063134e9dd031c0a55b523dd76befa..841ba19d64a69b153a38ef594220d1a4054d7e59 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -486,7 +486,6 @@ int __init omap3_pm_init(void) ret = request_irq(omap_prcm_event_to_irq("io"), _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", omap3_pm_init); - enable_irq(omap_prcm_event_to_irq("io")); if (ret) { pr_err("pm: Failed to request pm_io irq\n"); diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 382e236fbfd9a188a91aba9e731436dfca86dd02..64f6451499a795de85fd451903de1d9aef59ba7a 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -692,7 +692,6 @@ static int omap3xxx_prm_late_init(void) { struct device_node *np; int irq_num; - int ret; if (!(prm_features & PRM_HAS_IO_WAKEUP)) return 0; @@ -712,12 +711,8 @@ static int omap3xxx_prm_late_init(void) } omap3xxx_prm_enable_io_wakeup(); - ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); - if (!ret) - irq_set_status_flags(omap_prcm_event_to_irq("io"), - IRQ_NOAUTOEN); - return ret; + return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); } static void __exit omap3xxx_prm_exit(void) diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 87e86a4a9eadd7f6a544fb04e228a791268e6d73..3ab5df1ce900b26f91b8b582a36188bd281c6c3c 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -336,6 +336,27 @@ static void omap44xx_prm_reconfigure_io_chain(void) return; } +/** + * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches + * + * Activates the I/O wakeup event latches and allows events logged by + * those latches to signal a wakeup event to the PRCM. For I/O wakeups + * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and + * omap44xx_prm_reconfigure_io_chain() must be called. No return value. + */ +static void __init omap44xx_prm_enable_io_wakeup(void) +{ + s32 inst = omap4_prmst_get_prm_dev_inst(); + + if (inst == PRM_INSTANCE_UNKNOWN) + return; + + omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK, + OMAP4430_GLOBAL_WUEN_MASK, + inst, + omap4_prcm_irq_setup.pm_ctrl); +} + /** * omap44xx_prm_read_reset_sources - return the last SoC reset source * @@ -668,6 +689,8 @@ struct pwrdm_ops omap4_pwrdm_operations = { .pwrdm_has_voltdm = omap4_check_vcvp, }; +static int omap44xx_prm_late_init(void); + /* * XXX document */ @@ -675,6 +698,7 @@ static struct prm_ll_data omap44xx_prm_ll_data = { .read_reset_sources = &omap44xx_prm_read_reset_sources, .was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old, .clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old, + .late_init = &omap44xx_prm_late_init, .assert_hardreset = omap4_prminst_assert_hardreset, .deassert_hardreset = omap4_prminst_deassert_hardreset, .is_hardreset_asserted = omap4_prminst_is_hardreset_asserted, @@ -711,6 +735,37 @@ int __init omap44xx_prm_init(const struct omap_prcm_init_data *data) return prm_register(&omap44xx_prm_ll_data); } +static int omap44xx_prm_late_init(void) +{ + int irq_num; + + if (!(prm_features & PRM_HAS_IO_WAKEUP)) + return 0; + + irq_num = of_irq_get(prm_init_data->np, 0); + /* + * Already have OMAP4 IRQ num. For all other platforms, we need + * IRQ numbers from DT + */ + if (irq_num < 0 && !(prm_init_data->flags & PRM_IRQ_DEFAULT)) { + if (irq_num == -EPROBE_DEFER) + return irq_num; + + /* Have nothing to do */ + return 0; + } + + /* Once OMAP4 DT is filled as well */ + if (irq_num >= 0) { + omap4_prcm_irq_setup.irq = irq_num; + omap4_prcm_irq_setup.xlate_irq = NULL; + } + + omap44xx_prm_enable_io_wakeup(); + + return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup); +} + static void __exit omap44xx_prm_exit(void) { prm_unregister(&omap44xx_prm_ll_data); diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c index 8cadb302a7d2f54a3bbcddaf7296606293d69e4e..ffe05c27087e88340c40f3a302f87aae19e1d81e 100644 --- a/arch/arm/mach-prima2/common.c +++ b/arch/arm/mach-prima2/common.c @@ -15,7 +15,7 @@ #include #include "common.h" -static void __init sirfsoc_init_late(void) +static void __init __maybe_unused sirfsoc_init_late(void) { sirfsoc_pm_init(); } diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 76fbc115ec33f7bc620e462a01f80fd915e864c1..ce7d97babb0f9428803df53cae836e437ed08118 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -566,6 +566,7 @@ config MACH_ICONTROL config ARCH_PXA_ESERIES bool "PXA based Toshiba e-series PDAs" select FB_W100 + select FB select PXA25x config MACH_E330 diff --git a/arch/arm/mach-pxa/include/mach/mtd-xip.h b/arch/arm/mach-pxa/include/mach/mtd-xip.h index 990d2bf2fb45e6451226afe25a4699a14ff5f53a..9bf4ea6a6f7446b830a6bfadf5156ff9cf5aed0c 100644 --- a/arch/arm/mach-pxa/include/mach/mtd-xip.h +++ b/arch/arm/mach-pxa/include/mach/mtd-xip.h @@ -17,11 +17,15 @@ #include -#define xip_irqpending() (ICIP & ICMR) +/* restored July 2017, this did not build since 2011! */ + +#define ICIP io_p2v(0x40d00000) +#define ICMR io_p2v(0x40d00004) +#define xip_irqpending() (readl(ICIP) & readl(ICMR)) /* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */ -#define xip_currtime() (OSCR) -#define xip_elapsed_since(x) (signed)((OSCR - (x)) / 4) +#define xip_currtime() readl(OSCR) +#define xip_elapsed_since(x) (signed)((readl(OSCR) - (x)) / 4) /* * xip_cpu_idle() is used when waiting for a delay equal or larger than diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h index aa79fa47373ac8d3e605592cbe40e574cc9b3966..622d4e5df0293b48c697bb355a52a11613e7a629 100644 --- a/arch/arm/mach-rpc/include/mach/hardware.h +++ b/arch/arm/mach-rpc/include/mach/hardware.h @@ -25,8 +25,8 @@ * *_SIZE is the size of the region * *_BASE is the virtual address */ -#define RAM_SIZE 0x10000000 -#define RAM_START 0x10000000 +#define RPC_RAM_SIZE 0x10000000 +#define RPC_RAM_START 0x10000000 #define EASI_SIZE 0x08000000 /* EASI I/O */ #define EASI_START 0x08000000 diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c index 0db46895c82a4729d40b6c94c62482cd00ae1c9b..7d52cd97d96e4091adbcff105f9a0dc8db86c83e 100644 --- a/arch/arm/mach-sa1100/clock.c +++ b/arch/arm/mach-sa1100/clock.c @@ -35,6 +35,31 @@ struct clk clk_##_name = { \ static DEFINE_SPINLOCK(clocks_lock); +/* Dummy clk routine to build generic kernel parts that may be using them */ +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return clk_get_rate(clk); +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} +EXPORT_SYMBOL(clk_set_rate); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + return NULL; +} +EXPORT_SYMBOL(clk_get_parent); + static void clk_gpio27_enable(struct clk *clk) { /* diff --git a/arch/arm/mach-sa1100/include/mach/mtd-xip.h b/arch/arm/mach-sa1100/include/mach/mtd-xip.h index b3d684098fbf545c033e56793935cee7da3e2bf0..cb76096a2e36b4ea4ccf02f983244a97919ff243 100644 --- a/arch/arm/mach-sa1100/include/mach/mtd-xip.h +++ b/arch/arm/mach-sa1100/include/mach/mtd-xip.h @@ -20,7 +20,7 @@ #define xip_irqpending() (ICIP & ICMR) /* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */ -#define xip_currtime() (OSCR) -#define xip_elapsed_since(x) (signed)((OSCR - (x)) / 4) +#define xip_currtime() readl_relaxed(OSCR) +#define xip_elapsed_since(x) (signed)((readl_relaxed(OSCR) - (x)) / 4) #endif /* __ARCH_SA1100_MTD_XIP_H__ */ diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index 73e3adbc133096eca9cdf652d37ff110f5e35850..44438f344dc80f9c5073502fbe2019270d8df24a 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -67,8 +67,12 @@ static int regulator_quirk_notify(struct notifier_block *nb, { struct device *dev = data; struct i2c_client *client; + static bool done; u32 mon; + if (done) + return 0; + mon = ioread32(irqc + IRQC_MONITOR); dev_dbg(dev, "%s: %ld, IRQC_MONITOR = 0x%x\n", __func__, action, mon); if (mon & REGULATOR_IRQ_MASK) @@ -99,7 +103,7 @@ static int regulator_quirk_notify(struct notifier_block *nb, remove: dev_info(dev, "IRQ2 is not asserted, removing quirk\n"); - bus_unregister_notifier(&i2c_bus_type, nb); + done = true; iounmap(irqc); return 0; } diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 28083ef728195816b440b4076d0f1d4a69aa3092..71a34e8c345a5b19b98f42cc368f796118d3214f 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -133,6 +133,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler) static struct arm_pmu_platdata db8500_pmu_platdata = { .handle_irq = db8500_pmu_handler, + .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD, }; static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { diff --git a/arch/arm/mach-w90x900/clock.c b/arch/arm/mach-w90x900/clock.c index ac6fd1a2cb59fb43897d00581d18e53aa354ca80..3f93fac98d973dc9a952d40a98f33b9343545e8e 100644 --- a/arch/arm/mach-w90x900/clock.c +++ b/arch/arm/mach-w90x900/clock.c @@ -93,3 +93,32 @@ void nuc900_subclk_enable(struct clk *clk, int enable) __raw_writel(clken, W90X900_VA_CLKPWR + SUBCLK); } + +/* dummy functions, should not be called */ +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + WARN_ON(clk); + return 0; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + WARN_ON(clk); + return 0; +} +EXPORT_SYMBOL(clk_set_rate); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + WARN_ON(clk); + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + WARN_ON(clk); + return NULL; +} +EXPORT_SYMBOL(clk_get_parent); diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 90ee354d803e6ce6a9f06e6bf3513f439716ac35..6db5fc26d154c6febf6d61c0bdf66aae9c563f9e 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, { const struct dma_map_ops *ops = &dma_noop_ops; + void *ret; /* - * We are here because: + * Try generic allocator first if we are advertised that + * consistency is not required. + */ + + if (attrs & DMA_ATTR_NON_CONSISTENT) + return ops->alloc(dev, size, dma_handle, gfp, attrs); + + ret = dma_alloc_from_global_coherent(size, dma_handle); + + /* + * dma_alloc_from_global_coherent() may fail because: + * * - no consistent DMA region has been defined, so we can't * continue. * - there is no space left in consistent DMA region, so we @@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, * advertised that consistency is not required. */ - if (attrs & DMA_ATTR_NON_CONSISTENT) - return ops->alloc(dev, size, dma_handle, gfp, attrs); - - WARN_ON_ONCE(1); - return NULL; + WARN_ON_ONCE(ret == NULL); + return ret; } static void arm_nommu_dma_free(struct device *dev, size_t size, @@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, { const struct dma_map_ops *ops = &dma_noop_ops; - if (attrs & DMA_ATTR_NON_CONSISTENT) + if (attrs & DMA_ATTR_NON_CONSISTENT) { ops->free(dev, size, cpu_addr, dma_addr, attrs); - else - WARN_ON_ONCE(1); + } else { + int ret = dma_release_from_global_coherent(get_order(size), + cpu_addr); + + WARN_ON_ONCE(ret == 0); + } return; } +static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + int ret; + + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) + return ret; + + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + + static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { @@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist const struct dma_map_ops arm_nommu_dma_ops = { .alloc = arm_nommu_dma_alloc, .free = arm_nommu_dma_free, + .mmap = arm_nommu_dma_mmap, .map_page = arm_nommu_dma_map_page, .unmap_page = arm_nommu_dma_unmap_page, .map_sg = arm_nommu_dma_map_sg, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e7380bafbfa6d37ef928fecf38ec26c56df31f90..fcf1473d6fed5398be1f9474c2543958120e22a5 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long pfn = dma_to_pfn(dev, dma_addr); unsigned long off = vma->vm_pgoff; - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts index 0d1f026d831aac7b7ecda69ac5ec0f57e693ec2b..ba2fde2909f949531bf4e63e25fe22856b0cec3b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts @@ -51,6 +51,7 @@ compatible = "sinovoip,bananapi-m64", "allwinner,sun50i-a64"; aliases { + ethernet0 = &emac; serial0 = &uart0; serial1 = &uart1; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts index 08cda24ea194cbffd08fc8de50dc7a4d525e5019..827168bc22ed2c6f9b831e4b3b92d75980becf1d 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts @@ -51,6 +51,7 @@ compatible = "pine64,pine64", "allwinner,sun50i-a64"; aliases { + ethernet0 = &emac; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts index 17eb1cc5bf6b4061a8ea8cf40e54b4eb52c1ebe3..216e3a5dafaef892fc764a076a0e0e5bdfaf396e 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts @@ -53,6 +53,7 @@ "allwinner,sun50i-a64"; aliases { + ethernet0 = &emac; serial0 = &uart0; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 9d00622ce8453820441e9c3c26980cab35c3dcd8..bd0f33b77f5728781f558414eebf1eb02055a78d 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -452,7 +452,7 @@ emac: ethernet@1c30000 { compatible = "allwinner,sun50i-a64-emac"; syscon = <&syscon>; - reg = <0x01c30000 0x100>; + reg = <0x01c30000 0x10000>; interrupts = ; interrupt-names = "macirq"; resets = <&ccu RST_BUS_EMAC>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi index 732e2e06f503c83e42c0c4925b06295e6e47dc64..d9a720bff05d39ae93941140a3a8298f0bfa6570 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi @@ -120,5 +120,8 @@ }; &pio { + interrupts = , + , + ; compatible = "allwinner,sun50i-h5-pinctrl"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 35b8c88c3220c84c76ce1b22c8440ad8fdcbaa10..738ed689ff692b0f16b9add648f9f32dd514d010 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -400,7 +400,7 @@ }; pwm_AO_ab: pwm@550 { - compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm"; + compatible = "amlogic,meson-gx-ao-pwm", "amlogic,meson-gxbb-ao-pwm"; reg = <0x0 0x00550 0x0 0x10>; #pwm-cells = <3>; status = "disabled"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts index 72c5a9f64ca8499fe83a11bdbb17c1c95c4206c6..94567eb178759c18162c276baa4ba481f903c064 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts @@ -109,8 +109,8 @@ status = "okay"; pinctrl-0 = <&pwm_ao_a_3_pins>, <&pwm_ao_b_pins>; pinctrl-names = "default"; - clocks = <&clkc CLKID_FCLK_DIV4>; - clock-names = "clkin0"; + clocks = <&xtal> , <&xtal>; + clock-names = "clkin0", "clkin1" ; }; &pwm_ef { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 890821d6e52b2d01a00e06d03ded2b49e091857a..266fbcf3e47f5640b565c09fae289003b7b9e679 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -10,12 +10,20 @@ #include -#include "meson-gxl-s905x-p212.dtsi" +#include "meson-gxl-s905x.dtsi" / { compatible = "libretech,cc", "amlogic,s905x", "amlogic,meson-gxl"; model = "Libre Technology CC"; + aliases { + serial0 = &uart_AO; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + cvbs-connector { compatible = "composite-video-connector"; @@ -26,6 +34,11 @@ }; }; + emmc_pwrseq: emmc-pwrseq { + compatible = "mmc-pwrseq-emmc"; + reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>; + }; + hdmi-connector { compatible = "hdmi-connector"; type = "a"; @@ -53,6 +66,39 @@ linux,default-trigger = "heartbeat"; }; }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x0 0x0 0x80000000>; + }; + + vcc_3v3: regulator-vcc_3v3 { + compatible = "regulator-fixed"; + regulator-name = "VCC_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + vcc_card: regulator-vcc-card { + compatible = "regulator-gpio"; + + regulator-name = "VCC_CARD"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + + gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>; + gpios-states = <0>; + + states = <3300000 0>, + <1800000 1>; + }; + + vddio_boot: regulator-vddio_boot { + compatible = "regulator-fixed"; + regulator-name = "VDDIO_BOOT"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; }; &cvbs_vdac_port { @@ -61,6 +107,16 @@ }; }; +ðmac { + status = "okay"; +}; + +&ir { + status = "okay"; + pinctrl-0 = <&remote_input_ao_pins>; + pinctrl-names = "default"; +}; + &hdmi_tx { status = "okay"; pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; @@ -73,20 +129,43 @@ }; }; -/* - * The following devices exists but are exposed on the general - * purpose GPIO header. End user may well decide to use those pins - * for another purpose - */ +/* SD card */ +&sd_emmc_b { + status = "okay"; + pinctrl-0 = <&sdcard_pins>; + pinctrl-names = "default"; + + bus-width = <4>; + cap-sd-highspeed; + max-frequency = <100000000>; + disable-wp; + + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; + cd-inverted; -&sd_emmc_a { - status = "disabled"; + vmmc-supply = <&vcc_3v3>; + vqmmc-supply = <&vcc_card>; }; -&uart_A { - status = "disabled"; +/* eMMC */ +&sd_emmc_c { + status = "okay"; + pinctrl-0 = <&emmc_pins>; + pinctrl-names = "default"; + + bus-width = <8>; + cap-mmc-highspeed; + max-frequency = <50000000>; + non-removable; + disable-wp; + + mmc-pwrseq = <&emmc_pwrseq>; + vmmc-supply = <&vcc_3v3>; + vqmmc-supply = <&vddio_boot>; }; -&wifi32k { - status = "disabled"; +&uart_AO { + status = "okay"; + pinctrl-0 = <&uart_ao_a_pins>; + pinctrl-names = "default"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index dbcc3d4e2ed523e72bc8cd68dbf2c52503b7332f..51763d674050cb27c32b0909c62a62724d5d429d 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -219,7 +219,7 @@ reg = <0x18800 0x100>, <0x18C00 0x20>; gpiosb: gpio { #gpio-cells = <2>; - gpio-ranges = <&pinctrl_sb 0 0 29>; + gpio-ranges = <&pinctrl_sb 0 0 30>; gpio-controller; interrupts = , diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index 726528ce54e9650fe2b0ebfc085b0f0753e876d8..4c68605675a83db1639c0efed23788379c4d9e17 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi @@ -270,6 +270,7 @@ interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3", "eip"; clocks = <&cpm_clk 1 26>; + dma-coherent; }; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 95f8e5f607f608d2ec1bd258e1eec76f0d4b8528..923f354b02f00d199db276f60adffb013d688186 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -64,7 +64,7 @@ compatible = "marvell,armada-8k-rtc"; reg = <0x284000 0x20>, <0x284080 0x24>; reg-names = "rtc", "rtc-soc"; - interrupts = ; + interrupts = ; }; cps_ethernet: ethernet@0 { @@ -261,6 +261,7 @@ interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3", "eip"; clocks = <&cps_clk 1 26>; + dma-coherent; /* * The cryptographic engine found on the cp110 * master is enabled by default at the SoC diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index aef35e0b685a3a8599f33d1b227f31d0e5f551ea..f903957da504a9e02fa910243b60388dc85ec959 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -45,7 +45,7 @@ stdout-path = "serial0:115200n8"; }; - audio_clkout: audio_clkout { + audio_clkout: audio-clkout { /* * This is same as <&rcar_sound 0> * but needed to avoid cs2000/rcar_sound probe dead-lock @@ -508,7 +508,7 @@ /* audio_clkout0/1/2/3 */ #clock-cells = <1>; - clock-frequency = <11289600 12288000>; + clock-frequency = <12288000 11289600>; status = "okay"; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index b5c6ee07d7f91d84d4b5b8e612fcc4f4304bc4c2..d1a3f3b7a0ab0b97aff29a898813a023a7d5bd3d 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -281,7 +281,7 @@ /* audio_clkout0/1/2/3 */ #clock-cells = <1>; - clock-frequency = <11289600 12288000>; + clock-frequency = <12288000 11289600>; status = "okay"; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 6c7d147eed54de4cc19678ce8f318987406b0521..b4ca115b3be1c0a25121517e1e9e598d26c7c0b1 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -476,6 +476,7 @@ CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_MSM_GCC_8916=y CONFIG_MSM_GCC_8994=y CONFIG_MSM_MMCC_8996=y +CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_ARM_MHU=y CONFIG_PLATFORM_MHU=y diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 74d08e44a651b9e58de7d8a6075f6101083f5262..a652ce0a5cb2c33178ac2e447fad1159efb0c6a9 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -65,13 +65,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, u64 _val; \ if (needs_unstable_timer_counter_workaround()) { \ const struct arch_timer_erratum_workaround *wa; \ - preempt_disable(); \ + preempt_disable_notrace(); \ wa = __this_cpu_read(timer_unstable_counter_workaround); \ if (wa && wa->read_##reg) \ _val = wa->read_##reg(); \ else \ _val = read_sysreg(reg); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } else { \ _val = read_sysreg(reg); \ } \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 99fa69c9c3cf3ebf080c7443fb16ac6c11cf234a..9ef0797380cbbdf182a86e934c2eec5aa97d889d 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) " sub x30, x30, %[ret]\n" " cbnz x30, 1b\n" "2:") - : [ret] "+&r" (x0), [v] "+Q" (v->counter) + : [ret] "+r" (x0), [v] "+Q" (v->counter) : : __LL_SC_CLOBBERS, "cc", "memory"); diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index acae781f7359ece14f713e74e168deb6e47dd68b..3288c2b3673149b728ea52ee87bc65ba723c9811 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -114,10 +114,10 @@ /* * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * 64-bit, this is above 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE 0x100000000UL +#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 32f82723338a0e23bd880273aa9ae53e93a9979d..ef39dcb9ca6af1150152883cf338114536167b5f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -64,8 +64,10 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define VA_START (UL(0xffffffffffffffff) << VA_BITS) -#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) +#define VA_START (UL(0xffffffffffffffff) - \ + (UL(1) << VA_BITS) + 1) +#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ + (UL(1) << (VA_BITS - 1)) + 1) #define KIMAGE_VADDR (MODULES_END) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 16e44fa9b3b61b0bec9cc8ed92874310cd094176..248339e4aaf5a7a0b6bc09bc2184654bbda72845 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -492,7 +492,7 @@ asm( * the "%x0" template means XZR. */ #define write_sysreg(v, r) do { \ - u64 __val = (u64)v; \ + u64 __val = (u64)(v); \ asm volatile("msr " __stringify(r) ", %x0" \ : : "rZ" (__val)); \ } while (0) @@ -508,7 +508,7 @@ asm( }) #define write_sysreg_s(v, r) do { \ - u64 __val = (u64)v; \ + u64 __val = (u64)(v); \ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ } while (0) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8f0a1de11e4a6cd11fc7af0bd6db587538fab7be..fab46a0ea223d74781464a2a3904a4d2eac0a423 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs) */ #define __range_ok(addr, size) \ ({ \ - unsigned long __addr = (unsigned long __force)(addr); \ + unsigned long __addr = (unsigned long)(addr); \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index e137ceaf5016b2d796e6703058d6f62209955059..d16978213c5b332b439205f7c582e1c9a2f65e4d 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu) * Don't warn spuriously. */ if (cpu != 0) - pr_err("%s: missing enable-method property\n", - dn->full_name); + pr_err("%pOF: missing enable-method property\n", + dn); } } else { enable_method = acpi_get_enable_method(cpu); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 321119881abfedf93b4df127cb4f8bc74764e20d..dc66e6ec3a99dfe9e91d83f1cde2cec8ff225863 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn) */ cell = of_get_property(dn, "reg", NULL); if (!cell) { - pr_err("%s: missing reg property\n", dn->full_name); + pr_err("%pOF: missing reg property\n", dn); return INVALID_HWID; } @@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn) * Non affinity bits must be set to 0 in the DT */ if (hwid & ~MPIDR_HWID_BITMASK) { - pr_err("%s: invalid reg property\n", dn->full_name); + pr_err("%pOF: invalid reg property\n", dn); return INVALID_HWID; } return hwid; @@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void) goto next; if (is_mpidr_duplicate(cpu_count, hwid)) { - pr_err("%s: duplicate cpu reg properties in the DT\n", - dn->full_name); + pr_err("%pOF: duplicate cpu reg properties in the DT\n", + dn); goto next; } @@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void) */ if (hwid == cpu_logical_map(0)) { if (bootcpu_valid) { - pr_err("%s: duplicate boot cpu reg property in DT\n", - dn->full_name); + pr_err("%pOF: duplicate boot cpu reg property in DT\n", + dn); goto next; } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 79244c75eaec4fb6f13d3b19a9fd1b8e20346108..8d48b233e6ce5d09cd84db6a21a52f4d0dd68e97 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node) } } - pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name); + pr_crit("Unable to find CPU node for %pOF\n", cpu_node); of_node_put(cpu_node); return -1; @@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id, cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].thread_id = i; } else { - pr_err("%s: Can't get CPU for thread\n", - t->full_name); + pr_err("%pOF: Can't get CPU for thread\n", + t); of_node_put(t); return -EINVAL; } @@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id, cpu = get_cpu_for_node(core); if (cpu >= 0) { if (!leaf) { - pr_err("%s: Core has both threads and CPU\n", - core->full_name); + pr_err("%pOF: Core has both threads and CPU\n", + core); return -EINVAL; } cpu_topology[cpu].cluster_id = cluster_id; cpu_topology[cpu].core_id = core_id; } else if (leaf) { - pr_err("%s: Can't get CPU for leaf core\n", core->full_name); + pr_err("%pOF: Can't get CPU for leaf core\n", core); return -EINVAL; } @@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) has_cores = true; if (depth == 0) { - pr_err("%s: cpu-map children should be clusters\n", - c->full_name); + pr_err("%pOF: cpu-map children should be clusters\n", + c); of_node_put(c); return -EINVAL; } @@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) if (leaf) { ret = parse_core(c, cluster_id, core_id++); } else { - pr_err("%s: Non-leaf cluster with core %s\n", - cluster->full_name, name); + pr_err("%pOF: Non-leaf cluster with core %s\n", + cluster, name); ret = -EINVAL; } @@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth) } while (c); if (leaf && !has_cores) - pr_warn("%s: empty cluster\n", cluster->full_name); + pr_warn("%pOF: empty cluster\n", cluster); if (leaf) cluster_id++; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c7c7088097be0c1b8e80d594c3d02f715c03abfa..8a62648848e5754a747649078206e782ef550172 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock); void die(const char *str, struct pt_regs *regs, int err) { int ret; + unsigned long flags; + + raw_spin_lock_irqsave(&die_lock, flags); oops_enter(); - raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, regs); @@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err) bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); + + raw_spin_unlock_irqrestore(&die_lock, flags); + if (ret != NOTIFY_STOP) do_exit(SIGSEGV); } @@ -519,7 +523,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) { int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; - pt_regs_write_reg(regs, rt, read_sysreg(cntfrq_el0)); + pt_regs_write_reg(regs, rt, arch_timer_get_rate()); regs->pc += 4; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 77862881ae860943b2f2553dbbd93c65b64102ab..2e070d3baf9f12db1363ed95d079b19b02ca0382 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -764,7 +764,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { if (r->CRm & 0x2) /* accessing PMOVSSET_EL0 */ - kvm_pmu_overflow_set(vcpu, p->regval & mask); + vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); else /* accessing PMOVSCLR_EL0 */ vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index c3cd65e3181414bcaf3ec5b50937f95bf895570b..076c43715e64afe7fcc12eabd753d355ca94558f 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -30,9 +30,10 @@ */ ENTRY(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH - # Prefetch two cache lines ahead. - prfm pldl1strm, [x1, #128] - prfm pldl1strm, [x1, #256] + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] alternative_else_nop_endif ldp x2, x3, [x1] @@ -50,7 +51,7 @@ alternative_else_nop_endif subs x18, x18, #128 alternative_if ARM64_HAS_NO_HW_PREFETCH - prfm pldl1strm, [x1, #384] + prfm pldl1strm, [x1, #384] alternative_else_nop_endif stnp x2, x3, [x0] diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index e90cd1db42a809ebbb978cc9ae4674c0944d265f..f27d4dd043848dc8775a52588cffa7e27bd006a6 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev, vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, is_device_dma_coherent(dev)); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; return __swiotlb_mmap_pfn(vma, pfn, size); @@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, is_device_dma_coherent(dev)); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c7861c9864e69e7b6dc7efc16083063bbab99e98..2509e4fe699225675f74876032e22b24b338a3b1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -163,26 +163,27 @@ int ptep_set_access_flags(struct vm_area_struct *vma, /* only preserve the access flags and write permission */ pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY; - /* - * PTE_RDONLY is cleared by default in the asm below, so set it in - * back if necessary (read-only or clean PTE). - */ + /* set PTE_RDONLY if actual read-only or clean PTE */ if (!pte_write(entry) || !pte_sw_dirty(entry)) pte_val(entry) |= PTE_RDONLY; /* * Setting the flags must be done atomically to avoid racing with the - * hardware update of the access/dirty state. + * hardware update of the access/dirty state. The PTE_RDONLY bit must + * be set to the most permissive (lowest value) of *ptep and entry + * (calculated as: a & b == ~(~a | ~b)). */ + pte_val(entry) ^= PTE_RDONLY; asm volatile("// ptep_set_access_flags\n" " prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" - " and %0, %0, %3 // clear PTE_RDONLY\n" + " eor %0, %0, %3 // negate PTE_RDONLY in *ptep\n" " orr %0, %0, %4 // set flags\n" + " eor %0, %0, %3 // negate final PTE_RDONLY\n" " stxr %w1, %0, %2\n" " cbnz %w1, 1b\n" : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) - : "L" (~PTE_RDONLY), "r" (pte_val(entry))); + : "L" (PTE_RDONLY), "r" (pte_val(entry))); flush_tlb_fix_spurious_fault(vma, address); return 1; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 23c2d89a362e4423f62c732b290015d23211f38e..f1eb15e0e8642d2a74c7d19d18e8e8d3ae5a7062 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -496,7 +496,7 @@ void mark_rodata_ro(void) static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma, - int flags) + int flags, unsigned long vm_flags) { phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; @@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, early_pgtable_alloc, flags); + if (!(vm_flags & VM_NO_GUARD)) + size += PAGE_SIZE; + vma->addr = va_start; vma->phys_addr = pa_start; vma->size = size; - vma->flags = VM_MAP; + vma->flags = VM_MAP | vm_flags; vma->caller = __builtin_return_address(0); vm_area_add_early(vma); @@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd) * Only rodata will be remapped with different permissions later on, * all other segments are allowed to use contiguous mappings. */ - map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0); + map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0, + VM_NO_GUARD); map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, - &vmlinux_rodata, NO_CONT_MAPPINGS); + &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, - &vmlinux_inittext, 0); + &vmlinux_inittext, 0, VM_NO_GUARD); map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, - &vmlinux_initdata, 0); - map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0); + &vmlinux_initdata, 0, VM_NO_GUARD); + map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { /* diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index b388a99fea7b783e5ab8edcad004f8a7457ee40e..dad128ba98bf8d827a1f7cc750f41c543a8f92f8 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) } node_set(nid, numa_nodes_parsed); - pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n", - start, (end - 1), nid); return ret; } @@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) void *nd; int tnid; - if (start_pfn < end_pfn) - pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid, - start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); - else + if (start_pfn >= end_pfn) pr_info("Initmem setup node %d []\n", nid); nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index fced197b96264e01b20743e90706ed20cf30b242..cbe5ac3699bf0f9dbdfd726c112f6fc6bd1271f0 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) +arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end) { tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); @@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start * collected. */ static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force) { + if (force) + tlb->need_flush = 1; /* * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and * tlb->end_addr. diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8dd20358464f8efabcad2185d2b6f56c4fe805fa..48d91d5be4e9b6cf56c1cd0849830e2708967aa2 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB config MIPS_MT_SMP bool "MIPS MT SMP support (1 TC on each available VPE)" - depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 + depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select SYNC_R4K diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 04343625b9292807d886951eaed6866426dfc42d..bc2708c9ada40cf4206cd228d84a6ebf634161b3 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms ifdef CONFIG_PHYSICAL_START load-y = $(CONFIG_PHYSICAL_START) endif -entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \ + +entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \ | grep "\bkernel_entry\b" | cut -f1 -d \ ) +ifdef CONFIG_CPU_MICROMIPS + # + # Set the ISA bit, since the kernel_entry symbol in the ELF will have it + # clear which would lead to images containing addresses which bootloaders may + # jump to as MIPS32 code. + # + entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \ + $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \ + $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y))))))))) +else + entry-y = $(entry-noisa-y) +endif cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic drivers-$(CONFIG_PCI) += arch/mips/pci/ diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ebae133f1d000b096733e361e4a7a5003a6c9e67 --- /dev/null +++ b/arch/mips/boot/compressed/.gitignore @@ -0,0 +1,2 @@ +ashldi3.c +bswapsi.c diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index 542be1cd0f32c6cbe14cb0c3936ffe9cd11bd594..bfdfaf32d2c49742066329e800a6b535f363c3e1 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -13,9 +13,9 @@ #include #include #include +#include #include -#include /* USB Control Register */ union cvm_usbdrd_uctl_ctl { diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index 1910223a9c02ba1474929905b235e43a07501aaa..cea2bb1621e68b211d5dc99fdad78f208b1ad8a3 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S @@ -147,23 +147,12 @@ * Find irq with highest priority */ # open coded PTR_LA t1, cpu_mask_nr_tbl -#if (_MIPS_SZPTR == 32) +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, cpu_mask_nr_tbl lui t1, %hi(cpu_mask_nr_tbl) addiu t1, %lo(cpu_mask_nr_tbl) - -#endif -#if (_MIPS_SZPTR == 64) - # open coded dla t1, cpu_mask_nr_tbl - .set push - .set noat - lui t1, %highest(cpu_mask_nr_tbl) - lui AT, %hi(cpu_mask_nr_tbl) - daddiu t1, t1, %higher(cpu_mask_nr_tbl) - daddiu AT, AT, %lo(cpu_mask_nr_tbl) - dsll t1, 32 - daddu t1, t1, AT - .set pop +#else +#error GCC `-msym32' option required for 64-bit DECstation builds #endif 1: lw t2,(t1) nop @@ -214,23 +203,12 @@ * Find irq with highest priority */ # open coded PTR_LA t1,asic_mask_nr_tbl -#if (_MIPS_SZPTR == 32) +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, asic_mask_nr_tbl lui t1, %hi(asic_mask_nr_tbl) addiu t1, %lo(asic_mask_nr_tbl) - -#endif -#if (_MIPS_SZPTR == 64) - # open coded dla t1, asic_mask_nr_tbl - .set push - .set noat - lui t1, %highest(asic_mask_nr_tbl) - lui AT, %hi(asic_mask_nr_tbl) - daddiu t1, t1, %higher(asic_mask_nr_tbl) - daddiu AT, AT, %lo(asic_mask_nr_tbl) - dsll t1, 32 - daddu t1, t1, AT - .set pop +#else +#error GCC `-msym32' option required for 64-bit DECstation builds #endif 2: lw t2,(t1) nop diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h index fc67947ed6586c2e416b2f6cc588a4445b3761ab..8b14c2706aa52ca83e07ff4dcb37c71850d133bc 100644 --- a/arch/mips/include/asm/cache.h +++ b/arch/mips/include/asm/cache.h @@ -9,6 +9,8 @@ #ifndef _ASM_CACHE_H #define _ASM_CACHE_H +#include + #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 8baa9033b181d2ca2e3f6e469315bdb8bf5345b5..721b698bfe3cf7e0274bb9f0bb4bf58bfecef29e 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -428,6 +428,9 @@ #ifndef cpu_scache_line_size #define cpu_scache_line_size() cpu_data[0].scache.linesz #endif +#ifndef cpu_tcache_line_size +#define cpu_tcache_line_size() cpu_data[0].tcache.linesz +#endif #ifndef cpu_hwrena_impl_bits #define cpu_hwrena_impl_bits 0 diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h index 9df1a53bcb36c60bc082223ff8daa24a7eb4a506..b4e7dfa214eb0b882022fe1ce2e8e7e7c3f0097d 100644 --- a/arch/mips/include/asm/mach-ralink/ralink_regs.h +++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h @@ -13,6 +13,8 @@ #ifndef _RALINK_REGS_H_ #define _RALINK_REGS_H_ +#include + enum ralink_soc_type { RALINK_UNKNOWN = 0, RT2880_SOC, diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h index d045973ddb336abbe9b54b21a65b4fb1a6c4f5fe..3ea84acf1814cb2c874ac17dafc73c64db50b5dc 100644 --- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h @@ -33,6 +33,10 @@ #define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull)) #define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull)) #define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull)) +#define CVMX_L2C_ERR_TDTX(block_id) \ + (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull) +#define CVMX_L2C_ERR_TTGX(block_id) \ + (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull) #define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull)) #define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull)) #define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull)) @@ -66,9 +70,40 @@ ((offset) & 1) * 8) #define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \ ((offset) & 31) * 8) -#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull)) +union cvmx_l2c_err_tdtx { + uint64_t u64; + struct cvmx_l2c_err_tdtx_s { + __BITFIELD_FIELD(uint64_t dbe:1, + __BITFIELD_FIELD(uint64_t sbe:1, + __BITFIELD_FIELD(uint64_t vdbe:1, + __BITFIELD_FIELD(uint64_t vsbe:1, + __BITFIELD_FIELD(uint64_t syn:10, + __BITFIELD_FIELD(uint64_t reserved_22_49:28, + __BITFIELD_FIELD(uint64_t wayidx:18, + __BITFIELD_FIELD(uint64_t reserved_2_3:2, + __BITFIELD_FIELD(uint64_t type:2, + ;))))))))) + } s; +}; + +union cvmx_l2c_err_ttgx { + uint64_t u64; + struct cvmx_l2c_err_ttgx_s { + __BITFIELD_FIELD(uint64_t dbe:1, + __BITFIELD_FIELD(uint64_t sbe:1, + __BITFIELD_FIELD(uint64_t noway:1, + __BITFIELD_FIELD(uint64_t reserved_56_60:5, + __BITFIELD_FIELD(uint64_t syn:6, + __BITFIELD_FIELD(uint64_t reserved_22_49:28, + __BITFIELD_FIELD(uint64_t wayidx:15, + __BITFIELD_FIELD(uint64_t reserved_2_6:5, + __BITFIELD_FIELD(uint64_t type:2, + ;))))))))) + } s; +}; + union cvmx_l2c_cfg { uint64_t u64; struct cvmx_l2c_cfg_s { diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h new file mode 100644 index 0000000000000000000000000000000000000000..a951ad5d65ad89dd71e84a7c1f959dc0a68b1a18 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h @@ -0,0 +1,60 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2017 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_L2D_DEFS_H__ +#define __CVMX_L2D_DEFS_H__ + +#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull)) +#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull)) + + +union cvmx_l2d_err { + uint64_t u64; + struct cvmx_l2d_err_s { + __BITFIELD_FIELD(uint64_t reserved_6_63:58, + __BITFIELD_FIELD(uint64_t bmhclsel:1, + __BITFIELD_FIELD(uint64_t ded_err:1, + __BITFIELD_FIELD(uint64_t sec_err:1, + __BITFIELD_FIELD(uint64_t ded_intena:1, + __BITFIELD_FIELD(uint64_t sec_intena:1, + __BITFIELD_FIELD(uint64_t ecc_ena:1, + ;))))))) + } s; +}; + +union cvmx_l2d_fus3 { + uint64_t u64; + struct cvmx_l2d_fus3_s { + __BITFIELD_FIELD(uint64_t reserved_40_63:24, + __BITFIELD_FIELD(uint64_t ema_ctl:3, + __BITFIELD_FIELD(uint64_t reserved_34_36:3, + __BITFIELD_FIELD(uint64_t q3fus:34, + ;)))) + } s; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 9742202f2a326c45c504de6d77b51eb523421c0c..e638735cc3ac56c84fc861f293e6f2217b7ba3cb 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -62,6 +62,7 @@ enum cvmx_mips_space { #include #include #include +#include #include #include #include diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 770d4d1516cbb1ff34cca30cdccbe6c1b0aeebdc..6bace7695788fbc3b7663aeb353a08a3c45503af 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -376,9 +376,6 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); - complete(&cpu_running); - synchronise_count_slave(cpu); - set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -386,6 +383,9 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); + complete(&cpu_running); + synchronise_count_slave(cpu); + /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index e08598c70b3e72d3c579462d9b82cbe340256ac9..8e78251eccc25e0a981ede0b89d1df907f9e896a 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 3f74f6c1f065fb6b5d2b930dc1d164e1624e63f0..9fea6c6bbf49e3768e81ad3ffb8b8af2bbdef61f 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -48,7 +48,7 @@ #include "uasm.c" -static const struct insn const insn_table[insn_invalid] = { +static const struct insn insn_table[insn_invalid] = { [insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, [insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD}, [insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD}, diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c new file mode 100644 index 0000000000000000000000000000000000000000..3f87b96da5c4cba03aa2cff0f506f72626d99dbf --- /dev/null +++ b/arch/mips/net/ebpf_jit.c @@ -0,0 +1,1950 @@ +/* + * Just-In-Time compiler for eBPF filters on MIPS + * + * Copyright (c) 2017 Cavium, Inc. + * + * Based on code from: + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * Author: Markos Chandras + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers used by JIT */ +#define MIPS_R_ZERO 0 +#define MIPS_R_AT 1 +#define MIPS_R_V0 2 /* BPF_R0 */ +#define MIPS_R_V1 3 +#define MIPS_R_A0 4 /* BPF_R1 */ +#define MIPS_R_A1 5 /* BPF_R2 */ +#define MIPS_R_A2 6 /* BPF_R3 */ +#define MIPS_R_A3 7 /* BPF_R4 */ +#define MIPS_R_A4 8 /* BPF_R5 */ +#define MIPS_R_T4 12 /* BPF_AX */ +#define MIPS_R_T5 13 +#define MIPS_R_T6 14 +#define MIPS_R_T7 15 +#define MIPS_R_S0 16 /* BPF_R6 */ +#define MIPS_R_S1 17 /* BPF_R7 */ +#define MIPS_R_S2 18 /* BPF_R8 */ +#define MIPS_R_S3 19 /* BPF_R9 */ +#define MIPS_R_S4 20 /* BPF_TCC */ +#define MIPS_R_S5 21 +#define MIPS_R_S6 22 +#define MIPS_R_S7 23 +#define MIPS_R_T8 24 +#define MIPS_R_T9 25 +#define MIPS_R_SP 29 +#define MIPS_R_RA 31 + +/* eBPF flags */ +#define EBPF_SAVE_S0 BIT(0) +#define EBPF_SAVE_S1 BIT(1) +#define EBPF_SAVE_S2 BIT(2) +#define EBPF_SAVE_S3 BIT(3) +#define EBPF_SAVE_S4 BIT(4) +#define EBPF_SAVE_RA BIT(5) +#define EBPF_SEEN_FP BIT(6) +#define EBPF_SEEN_TC BIT(7) +#define EBPF_TCC_IN_V1 BIT(8) + +/* + * For the mips64 ISA, we need to track the value range or type for + * each JIT register. The BPF machine requires zero extended 32-bit + * values, but the mips64 ISA requires sign extended 32-bit values. + * At each point in the BPF program we track the state of every + * register so that we can zero extend or sign extend as the BPF + * semantics require. + */ +enum reg_val_type { + /* uninitialized */ + REG_UNKNOWN, + /* not known to be 32-bit compatible. */ + REG_64BIT, + /* 32-bit compatible, no truncation needed for 64-bit ops. */ + REG_64BIT_32BIT, + /* 32-bit compatible, need truncation for 64-bit ops. */ + REG_32BIT, + /* 32-bit zero extended. */ + REG_32BIT_ZERO_EX, + /* 32-bit no sign/zero extension needed. */ + REG_32BIT_POS +}; + +/* + * high bit of offsets indicates if long branch conversion done at + * this insn. + */ +#define OFFSETS_B_CONV BIT(31) + +/** + * struct jit_ctx - JIT context + * @skf: The sk_filter + * @stack_size: eBPF stack size + * @tmp_offset: eBPF $sp offset to 8-byte temporary memory + * @idx: Instruction index + * @flags: JIT flags + * @offsets: Instruction offsets + * @target: Memory location for the compiled filter + * @reg_val_types Packed enum reg_val_type for each register. + */ +struct jit_ctx { + const struct bpf_prog *skf; + int stack_size; + int tmp_offset; + u32 idx; + u32 flags; + u32 *offsets; + u32 *target; + u64 *reg_val_types; + unsigned int long_b_conversion:1; + unsigned int gen_b_offsets:1; +}; + +static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) +{ + *rvt &= ~(7ull << (reg * 3)); + *rvt |= ((u64)type << (reg * 3)); +} + +static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx, + int index, int reg) +{ + return (ctx->reg_val_types[index] >> (reg * 3)) & 7; +} + +/* Simply emit the instruction if the JIT memory space has been allocated */ +#define emit_instr(ctx, func, ...) \ +do { \ + if ((ctx)->target != NULL) { \ + u32 *p = &(ctx)->target[ctx->idx]; \ + uasm_i_##func(&p, ##__VA_ARGS__); \ + } \ + (ctx)->idx++; \ +} while (0) + +static unsigned int j_target(struct jit_ctx *ctx, int target_idx) +{ + unsigned long target_va, base_va; + unsigned int r; + + if (!ctx->target) + return 0; + + base_va = (unsigned long)ctx->target; + target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV); + + if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful)) + return (unsigned int)-1; + r = target_va & 0x0ffffffful; + return r; +} + +/* Compute the immediate value for PC-relative branches. */ +static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) +{ + if (!ctx->gen_b_offsets) + return 0; + + /* + * We want a pc-relative branch. tgt is the instruction offset + * we want to jump to. + + * Branch on MIPS: + * I: target_offset <- sign_extend(offset) + * I+1: PC += target_offset (delay slot) + * + * ctx->idx currently points to the branch instruction + * but the offset is added to the delay slot so we need + * to subtract 4. + */ + return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) - + (ctx->idx * 4) - 4; +} + +int bpf_jit_enable __read_mostly; + +enum which_ebpf_reg { + src_reg, + src_reg_no_fp, + dst_reg, + dst_reg_fp_ok +}; + +/* + * For eBPF, the register mapping naturally falls out of the + * requirements of eBPF and the MIPS n64 ABI. We don't maintain a + * separate frame pointer, so BPF_REG_10 relative accesses are + * adjusted to be $sp relative. + */ +int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn, + enum which_ebpf_reg w) +{ + int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ? + insn->src_reg : insn->dst_reg; + + switch (ebpf_reg) { + case BPF_REG_0: + return MIPS_R_V0; + case BPF_REG_1: + return MIPS_R_A0; + case BPF_REG_2: + return MIPS_R_A1; + case BPF_REG_3: + return MIPS_R_A2; + case BPF_REG_4: + return MIPS_R_A3; + case BPF_REG_5: + return MIPS_R_A4; + case BPF_REG_6: + ctx->flags |= EBPF_SAVE_S0; + return MIPS_R_S0; + case BPF_REG_7: + ctx->flags |= EBPF_SAVE_S1; + return MIPS_R_S1; + case BPF_REG_8: + ctx->flags |= EBPF_SAVE_S2; + return MIPS_R_S2; + case BPF_REG_9: + ctx->flags |= EBPF_SAVE_S3; + return MIPS_R_S3; + case BPF_REG_10: + if (w == dst_reg || w == src_reg_no_fp) + goto bad_reg; + ctx->flags |= EBPF_SEEN_FP; + /* + * Needs special handling, return something that + * cannot be clobbered just in case. + */ + return MIPS_R_ZERO; + case BPF_REG_AX: + return MIPS_R_T4; + default: +bad_reg: + WARN(1, "Illegal bpf reg: %d\n", ebpf_reg); + return -EINVAL; + } +} +/* + * eBPF stack frame will be something like: + * + * Entry $sp ------> +--------------------------------+ + * | $ra (optional) | + * +--------------------------------+ + * | $s0 (optional) | + * +--------------------------------+ + * | $s1 (optional) | + * +--------------------------------+ + * | $s2 (optional) | + * +--------------------------------+ + * | $s3 (optional) | + * +--------------------------------+ + * | $s4 (optional) | + * +--------------------------------+ + * | tmp-storage (if $ra saved) | + * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10 + * | BPF_REG_10 relative storage | + * | MAX_BPF_STACK (optional) | + * | . | + * | . | + * | . | + * $sp --------> +--------------------------------+ + * + * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized + * area is not allocated. + */ +static int gen_int_prologue(struct jit_ctx *ctx) +{ + int stack_adjust = 0; + int store_offset; + int locals_size; + + if (ctx->flags & EBPF_SAVE_RA) + /* + * If RA we are doing a function call and may need + * extra 8-byte tmp area. + */ + stack_adjust += 16; + if (ctx->flags & EBPF_SAVE_S0) + stack_adjust += 8; + if (ctx->flags & EBPF_SAVE_S1) + stack_adjust += 8; + if (ctx->flags & EBPF_SAVE_S2) + stack_adjust += 8; + if (ctx->flags & EBPF_SAVE_S3) + stack_adjust += 8; + if (ctx->flags & EBPF_SAVE_S4) + stack_adjust += 8; + + BUILD_BUG_ON(MAX_BPF_STACK & 7); + locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; + + stack_adjust += locals_size; + ctx->tmp_offset = locals_size; + + ctx->stack_size = stack_adjust; + + /* + * First instruction initializes the tail call count (TCC). + * On tail call we skip this instruction, and the TCC is + * passed in $v1 from the caller. + */ + emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); + if (stack_adjust) + emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); + else + return 0; + + store_offset = stack_adjust - 8; + + if (ctx->flags & EBPF_SAVE_RA) { + emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S0) { + emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S1) { + emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S2) { + emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S3) { + emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S4) { + emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); + store_offset -= 8; + } + + if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) + emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); + + return 0; +} + +static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) +{ + const struct bpf_prog *prog = ctx->skf; + int stack_adjust = ctx->stack_size; + int store_offset = stack_adjust - 8; + int r0 = MIPS_R_V0; + + if (dest_reg == MIPS_R_RA && + get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) + /* Don't let zero extended value escape. */ + emit_instr(ctx, sll, r0, r0, 0); + + if (ctx->flags & EBPF_SAVE_RA) { + emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S0) { + emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S1) { + emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S2) { + emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S3) { + emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); + store_offset -= 8; + } + if (ctx->flags & EBPF_SAVE_S4) { + emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); + store_offset -= 8; + } + emit_instr(ctx, jr, dest_reg); + + if (stack_adjust) + emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); + else + emit_instr(ctx, nop); + + return 0; +} + +static void gen_imm_to_reg(const struct bpf_insn *insn, int reg, + struct jit_ctx *ctx) +{ + if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { + emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm); + } else { + int lower = (s16)(insn->imm & 0xffff); + int upper = insn->imm - lower; + + emit_instr(ctx, lui, reg, upper >> 16); + emit_instr(ctx, addiu, reg, reg, lower); + } + +} + +static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, + int idx) +{ + int upper_bound, lower_bound; + int dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + + if (dst < 0) + return dst; + + switch (BPF_OP(insn->code)) { + case BPF_MOV: + case BPF_ADD: + upper_bound = S16_MAX; + lower_bound = S16_MIN; + break; + case BPF_SUB: + upper_bound = -(int)S16_MIN; + lower_bound = -(int)S16_MAX; + break; + case BPF_AND: + case BPF_OR: + case BPF_XOR: + upper_bound = 0xffff; + lower_bound = 0; + break; + case BPF_RSH: + case BPF_LSH: + case BPF_ARSH: + /* Shift amounts are truncated, no need for bounds */ + upper_bound = S32_MAX; + lower_bound = S32_MIN; + break; + default: + return -EINVAL; + } + + /* + * Immediate move clobbers the register, so no sign/zero + * extension needed. + */ + if (BPF_CLASS(insn->code) == BPF_ALU64 && + BPF_OP(insn->code) != BPF_MOV && + get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT) + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + /* BPF_ALU | BPF_LSH doesn't need separate sign extension */ + if (BPF_CLASS(insn->code) == BPF_ALU && + BPF_OP(insn->code) != BPF_LSH && + BPF_OP(insn->code) != BPF_MOV && + get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT) + emit_instr(ctx, sll, dst, dst, 0); + + if (insn->imm >= lower_bound && insn->imm <= upper_bound) { + /* single insn immediate case */ + switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { + case BPF_ALU64 | BPF_MOV: + emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm); + break; + case BPF_ALU64 | BPF_AND: + case BPF_ALU | BPF_AND: + emit_instr(ctx, andi, dst, dst, insn->imm); + break; + case BPF_ALU64 | BPF_OR: + case BPF_ALU | BPF_OR: + emit_instr(ctx, ori, dst, dst, insn->imm); + break; + case BPF_ALU64 | BPF_XOR: + case BPF_ALU | BPF_XOR: + emit_instr(ctx, xori, dst, dst, insn->imm); + break; + case BPF_ALU64 | BPF_ADD: + emit_instr(ctx, daddiu, dst, dst, insn->imm); + break; + case BPF_ALU64 | BPF_SUB: + emit_instr(ctx, daddiu, dst, dst, -insn->imm); + break; + case BPF_ALU64 | BPF_RSH: + emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f); + break; + case BPF_ALU | BPF_RSH: + emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f); + break; + case BPF_ALU64 | BPF_LSH: + emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f); + break; + case BPF_ALU | BPF_LSH: + emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f); + break; + case BPF_ALU64 | BPF_ARSH: + emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f); + break; + case BPF_ALU | BPF_ARSH: + emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f); + break; + case BPF_ALU | BPF_MOV: + emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm); + break; + case BPF_ALU | BPF_ADD: + emit_instr(ctx, addiu, dst, dst, insn->imm); + break; + case BPF_ALU | BPF_SUB: + emit_instr(ctx, addiu, dst, dst, -insn->imm); + break; + default: + return -EINVAL; + } + } else { + /* multi insn immediate case */ + if (BPF_OP(insn->code) == BPF_MOV) { + gen_imm_to_reg(insn, dst, ctx); + } else { + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { + case BPF_ALU64 | BPF_AND: + case BPF_ALU | BPF_AND: + emit_instr(ctx, and, dst, dst, MIPS_R_AT); + break; + case BPF_ALU64 | BPF_OR: + case BPF_ALU | BPF_OR: + emit_instr(ctx, or, dst, dst, MIPS_R_AT); + break; + case BPF_ALU64 | BPF_XOR: + case BPF_ALU | BPF_XOR: + emit_instr(ctx, xor, dst, dst, MIPS_R_AT); + break; + case BPF_ALU64 | BPF_ADD: + emit_instr(ctx, daddu, dst, dst, MIPS_R_AT); + break; + case BPF_ALU64 | BPF_SUB: + emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT); + break; + case BPF_ALU | BPF_ADD: + emit_instr(ctx, addu, dst, dst, MIPS_R_AT); + break; + case BPF_ALU | BPF_SUB: + emit_instr(ctx, subu, dst, dst, MIPS_R_AT); + break; + default: + return -EINVAL; + } + } + } + + return 0; +} + +static void * __must_check +ool_skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *buffer) +{ + return skb_header_pointer(skb, offset, len, buffer); +} + +static int size_to_len(const struct bpf_insn *insn) +{ + switch (BPF_SIZE(insn->code)) { + case BPF_B: + return 1; + case BPF_H: + return 2; + case BPF_W: + return 4; + case BPF_DW: + return 8; + } + return 0; +} + +static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) +{ + if (value >= 0xffffffffffff8000ull || value < 0x8000ull) { + emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value); + } else if (value >= 0xffffffff80000000ull || + (value < 0x80000000 && value > 0xffff)) { + emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16)); + emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff)); + } else { + int i; + bool seen_part = false; + int needed_shift = 0; + + for (i = 0; i < 4; i++) { + u64 part = (value >> (16 * (3 - i))) & 0xffff; + + if (seen_part && needed_shift > 0 && (part || i == 3)) { + emit_instr(ctx, dsll_safe, dst, dst, needed_shift); + needed_shift = 0; + } + if (part) { + if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) { + emit_instr(ctx, lui, dst, (s32)(s16)part); + needed_shift = -16; + } else { + emit_instr(ctx, ori, dst, + seen_part ? dst : MIPS_R_ZERO, + (unsigned int)part); + } + seen_part = true; + } + if (seen_part) + needed_shift += 16; + } + } +} + +static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) +{ + int off, b_off; + + ctx->flags |= EBPF_SEEN_TC; + /* + * if (index >= array->map.max_entries) + * goto out; + */ + off = offsetof(struct bpf_array, map.max_entries); + emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1); + emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2); + b_off = b_imm(this_idx + 1, ctx); + emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); + /* + * if (--TCC < 0) + * goto out; + */ + /* Delay slot */ + emit_instr(ctx, daddiu, MIPS_R_T5, + (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); + b_off = b_imm(this_idx + 1, ctx); + emit_instr(ctx, bltz, MIPS_R_T5, b_off); + /* + * prog = array->ptrs[index]; + * if (prog == NULL) + * goto out; + */ + /* Delay slot */ + emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3); + emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1); + off = offsetof(struct bpf_array, ptrs); + emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8); + b_off = b_imm(this_idx + 1, ctx); + emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off); + /* Delay slot */ + emit_instr(ctx, nop); + + /* goto *(prog->bpf_func + 4); */ + off = offsetof(struct bpf_prog, bpf_func); + emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT); + /* All systems are go... propagate TCC */ + emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO); + /* Skip first instruction (TCC initialization) */ + emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4); + return build_int_epilogue(ctx, MIPS_R_T9); +} + +static bool use_bbit_insns(void) +{ + switch (current_cpu_type()) { + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_CAVIUM_OCTEON3: + return true; + default: + return false; + } +} + +static bool is_bad_offset(int b_off) +{ + return b_off > 0x1ffff || b_off < -0x20000; +} + +/* Returns the number of insn slots consumed. */ +static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, + int this_idx, int exit_idx) +{ + int src, dst, r, td, ts, mem_off, b_off; + bool need_swap, did_move, cmp_eq; + unsigned int target; + u64 t64; + s64 t64s; + + switch (insn->code) { + case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */ + case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */ + case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */ + case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */ + case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */ + case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */ + case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */ + case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */ + case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */ + case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */ + case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */ + r = gen_imm_insn(insn, ctx, this_idx); + if (r < 0) + return r; + break; + case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + if (insn->imm == 1) /* Mult by 1 is a nop */ + break; + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + emit_instr(ctx, dmultu, MIPS_R_AT, dst); + emit_instr(ctx, mflo, dst); + break; + case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst); + break; + case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + /* sign extend */ + emit_instr(ctx, sll, dst, dst, 0); + } + if (insn->imm == 1) /* Mult by 1 is a nop */ + break; + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + emit_instr(ctx, multu, dst, MIPS_R_AT); + emit_instr(ctx, mflo, dst); + break; + case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + /* sign extend */ + emit_instr(ctx, sll, dst, dst, 0); + } + emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst); + break; + case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */ + case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + if (insn->imm == 0) { /* Div by zero */ + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); + emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); + } + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) + /* sign extend */ + emit_instr(ctx, sll, dst, dst, 0); + if (insn->imm == 1) { + /* div by 1 is a nop, mod by 1 is zero */ + if (BPF_OP(insn->code) == BPF_MOD) + emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); + break; + } + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + emit_instr(ctx, divu, dst, MIPS_R_AT); + if (BPF_OP(insn->code) == BPF_DIV) + emit_instr(ctx, mflo, dst); + else + emit_instr(ctx, mfhi, dst); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */ + case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + if (insn->imm == 0) { /* Div by zero */ + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); + emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); + } + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + + if (insn->imm == 1) { + /* div by 1 is a nop, mod by 1 is zero */ + if (BPF_OP(insn->code) == BPF_MOD) + emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); + break; + } + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + emit_instr(ctx, ddivu, dst, MIPS_R_AT); + if (BPF_OP(insn->code) == BPF_DIV) + emit_instr(ctx, mflo, dst); + else + emit_instr(ctx, mfhi, dst); + break; + case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */ + case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */ + src = ebpf_to_mips_reg(ctx, insn, src_reg); + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (src < 0 || dst < 0) + return -EINVAL; + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + did_move = false; + if (insn->src_reg == BPF_REG_10) { + if (BPF_OP(insn->code) == BPF_MOV) { + emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); + did_move = true; + } else { + emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK); + src = MIPS_R_AT; + } + } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { + int tmp_reg = MIPS_R_AT; + + if (BPF_OP(insn->code) == BPF_MOV) { + tmp_reg = dst; + did_move = true; + } + emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO); + emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); + src = MIPS_R_AT; + } + switch (BPF_OP(insn->code)) { + case BPF_MOV: + if (!did_move) + emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); + break; + case BPF_ADD: + emit_instr(ctx, daddu, dst, dst, src); + break; + case BPF_SUB: + emit_instr(ctx, dsubu, dst, dst, src); + break; + case BPF_XOR: + emit_instr(ctx, xor, dst, dst, src); + break; + case BPF_OR: + emit_instr(ctx, or, dst, dst, src); + break; + case BPF_AND: + emit_instr(ctx, and, dst, dst, src); + break; + case BPF_MUL: + emit_instr(ctx, dmultu, dst, src); + emit_instr(ctx, mflo, dst); + break; + case BPF_DIV: + case BPF_MOD: + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); + emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); + emit_instr(ctx, ddivu, dst, src); + if (BPF_OP(insn->code) == BPF_DIV) + emit_instr(ctx, mflo, dst); + else + emit_instr(ctx, mfhi, dst); + break; + case BPF_LSH: + emit_instr(ctx, dsllv, dst, dst, src); + break; + case BPF_RSH: + emit_instr(ctx, dsrlv, dst, dst, src); + break; + case BPF_ARSH: + emit_instr(ctx, dsrav, dst, dst, src); + break; + default: + pr_err("ALU64_REG NOT HANDLED\n"); + return -EINVAL; + } + break; + case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */ + case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */ + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (src < 0 || dst < 0) + return -EINVAL; + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + /* sign extend */ + emit_instr(ctx, sll, dst, dst, 0); + } + did_move = false; + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); + if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { + int tmp_reg = MIPS_R_AT; + + if (BPF_OP(insn->code) == BPF_MOV) { + tmp_reg = dst; + did_move = true; + } + /* sign extend */ + emit_instr(ctx, sll, tmp_reg, src, 0); + src = MIPS_R_AT; + } + switch (BPF_OP(insn->code)) { + case BPF_MOV: + if (!did_move) + emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); + break; + case BPF_ADD: + emit_instr(ctx, addu, dst, dst, src); + break; + case BPF_SUB: + emit_instr(ctx, subu, dst, dst, src); + break; + case BPF_XOR: + emit_instr(ctx, xor, dst, dst, src); + break; + case BPF_OR: + emit_instr(ctx, or, dst, dst, src); + break; + case BPF_AND: + emit_instr(ctx, and, dst, dst, src); + break; + case BPF_MUL: + emit_instr(ctx, mul, dst, dst, src); + break; + case BPF_DIV: + case BPF_MOD: + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); + emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); + emit_instr(ctx, divu, dst, src); + if (BPF_OP(insn->code) == BPF_DIV) + emit_instr(ctx, mflo, dst); + else + emit_instr(ctx, mfhi, dst); + break; + case BPF_LSH: + emit_instr(ctx, sllv, dst, dst, src); + break; + case BPF_RSH: + emit_instr(ctx, srlv, dst, dst, src); + break; + default: + pr_err("ALU_REG NOT HANDLED\n"); + return -EINVAL; + } + break; + case BPF_JMP | BPF_EXIT: + if (this_idx + 1 < exit_idx) { + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); + emit_instr(ctx, nop); + } + break; + case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ + case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ + cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); + if (dst < 0) + return dst; + if (insn->imm == 0) { + src = MIPS_R_ZERO; + } else { + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + src = MIPS_R_AT; + } + goto jeq_common; + case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (src < 0 || dst < 0) + return -EINVAL; + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); + if (td == REG_32BIT && ts != REG_32BIT) { + emit_instr(ctx, sll, MIPS_R_AT, src, 0); + src = MIPS_R_AT; + } else if (ts == REG_32BIT && td != REG_32BIT) { + emit_instr(ctx, sll, MIPS_R_AT, dst, 0); + dst = MIPS_R_AT; + } + if (BPF_OP(insn->code) == BPF_JSET) { + emit_instr(ctx, and, MIPS_R_AT, dst, src); + cmp_eq = false; + dst = MIPS_R_AT; + src = MIPS_R_ZERO; + } else if (BPF_OP(insn->code) == BPF_JSGT) { + emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, blez, MIPS_R_AT, b_off); + emit_instr(ctx, nop); + return 2; /* We consumed the exit. */ + } + b_off = b_imm(this_idx + insn->off + 1, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + emit_instr(ctx, nop); + break; + } else if (BPF_OP(insn->code) == BPF_JSGE) { + emit_instr(ctx, slt, MIPS_R_AT, dst, src); + cmp_eq = true; + dst = MIPS_R_AT; + src = MIPS_R_ZERO; + } else if (BPF_OP(insn->code) == BPF_JGT) { + /* dst or src could be AT */ + emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); + emit_instr(ctx, sltu, MIPS_R_AT, dst, src); + /* SP known to be non-zero, movz becomes boolean not */ + emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); + emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); + emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); + cmp_eq = true; + dst = MIPS_R_AT; + src = MIPS_R_ZERO; + } else if (BPF_OP(insn->code) == BPF_JGE) { + emit_instr(ctx, sltu, MIPS_R_AT, dst, src); + cmp_eq = true; + dst = MIPS_R_AT; + src = MIPS_R_ZERO; + } else { /* JNE/JEQ case */ + cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + } +jeq_common: + /* + * If the next insn is EXIT and we are jumping arround + * only it, invert the sense of the compare and + * conditionally jump to the exit. Poor man's branch + * chaining. + */ + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) { + target = j_target(ctx, exit_idx); + if (target == (unsigned int)-1) + return -E2BIG; + cmp_eq = !cmp_eq; + b_off = 4 * 3; + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { + ctx->offsets[this_idx] |= OFFSETS_B_CONV; + ctx->long_b_conversion = 1; + } + } + + if (cmp_eq) + emit_instr(ctx, bne, dst, src, b_off); + else + emit_instr(ctx, beq, dst, src, b_off); + emit_instr(ctx, nop); + if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { + emit_instr(ctx, j, target); + emit_instr(ctx, nop); + } + return 2; /* We consumed the exit. */ + } + b_off = b_imm(this_idx + insn->off + 1, ctx); + if (is_bad_offset(b_off)) { + target = j_target(ctx, this_idx + insn->off + 1); + if (target == (unsigned int)-1) + return -E2BIG; + cmp_eq = !cmp_eq; + b_off = 4 * 3; + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { + ctx->offsets[this_idx] |= OFFSETS_B_CONV; + ctx->long_b_conversion = 1; + } + } + + if (cmp_eq) + emit_instr(ctx, beq, dst, src, b_off); + else + emit_instr(ctx, bne, dst, src, b_off); + emit_instr(ctx, nop); + if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { + emit_instr(ctx, j, target); + emit_instr(ctx, nop); + } + break; + case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ + case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ + cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); + if (dst < 0) + return dst; + + if (insn->imm == 0) { + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + if (cmp_eq) + emit_instr(ctx, bltz, dst, b_off); + else + emit_instr(ctx, blez, dst, b_off); + emit_instr(ctx, nop); + return 2; /* We consumed the exit. */ + } + b_off = b_imm(this_idx + insn->off + 1, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + if (cmp_eq) + emit_instr(ctx, bgez, dst, b_off); + else + emit_instr(ctx, bgtz, dst, b_off); + emit_instr(ctx, nop); + break; + } + /* + * only "LT" compare available, so we must use imm + 1 + * to generate "GT" + */ + t64s = insn->imm + (cmp_eq ? 0 : 1); + if (t64s >= S16_MIN && t64s <= S16_MAX) { + emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); + src = MIPS_R_AT; + dst = MIPS_R_ZERO; + cmp_eq = true; + goto jeq_common; + } + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); + emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); + src = MIPS_R_AT; + dst = MIPS_R_ZERO; + cmp_eq = true; + goto jeq_common; + + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + cmp_eq = (BPF_OP(insn->code) == BPF_JGE); + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); + if (dst < 0) + return dst; + /* + * only "LT" compare available, so we must use imm + 1 + * to generate "GT" + */ + t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); + if (t64s >= 0 && t64s <= S16_MAX) { + emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); + src = MIPS_R_AT; + dst = MIPS_R_ZERO; + cmp_eq = true; + goto jeq_common; + } + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); + emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); + src = MIPS_R_AT; + dst = MIPS_R_ZERO; + cmp_eq = true; + goto jeq_common; + + case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); + if (dst < 0) + return dst; + + if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off); + emit_instr(ctx, nop); + return 2; /* We consumed the exit. */ + } + b_off = b_imm(this_idx + insn->off + 1, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off); + emit_instr(ctx, nop); + break; + } + t64 = (u32)insn->imm; + emit_const_to_reg(ctx, MIPS_R_AT, t64); + emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT); + src = MIPS_R_AT; + dst = MIPS_R_ZERO; + cmp_eq = false; + goto jeq_common; + + case BPF_JMP | BPF_JA: + /* + * Prefer relative branch for easier debugging, but + * fall back if needed. + */ + b_off = b_imm(this_idx + insn->off + 1, ctx); + if (is_bad_offset(b_off)) { + target = j_target(ctx, this_idx + insn->off + 1); + if (target == (unsigned int)-1) + return -E2BIG; + emit_instr(ctx, j, target); + } else { + emit_instr(ctx, b, b_off); + } + emit_instr(ctx, nop); + break; + case BPF_LD | BPF_DW | BPF_IMM: + if (insn->src_reg != 0) + return -EINVAL; + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32); + emit_const_to_reg(ctx, dst, t64); + return 2; /* Double slot insn */ + + case BPF_JMP | BPF_CALL: + ctx->flags |= EBPF_SAVE_RA; + t64s = (s64)insn->imm + (s64)__bpf_call_base; + emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); + emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + /* delay slot */ + emit_instr(ctx, nop); + break; + + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx, this_idx)) + return -EINVAL; + break; + + case BPF_LD | BPF_B | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_DW | BPF_ABS: + ctx->flags |= EBPF_SAVE_RA; + + gen_imm_to_reg(insn, MIPS_R_A1, ctx); + emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); + + if (insn->imm < 0) { + emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper); + } else { + emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); + emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); + } + goto ld_skb_common; + + case BPF_LD | BPF_B | BPF_IND: + case BPF_LD | BPF_H | BPF_IND: + case BPF_LD | BPF_W | BPF_IND: + case BPF_LD | BPF_DW | BPF_IND: + ctx->flags |= EBPF_SAVE_RA; + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); + if (src < 0) + return src; + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); + if (ts == REG_32BIT_ZERO_EX) { + /* sign extend */ + emit_instr(ctx, sll, MIPS_R_A1, src, 0); + src = MIPS_R_A1; + } + if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { + emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm); + } else { + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src); + } + /* truncate to 32-bit int */ + emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0); + emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); + emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO); + + emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper); + emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); + emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); + emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT); + +ld_skb_common: + emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + /* delay slot move */ + emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO); + + /* Check the error value */ + b_off = b_imm(exit_idx, ctx); + if (is_bad_offset(b_off)) { + target = j_target(ctx, exit_idx); + if (target == (unsigned int)-1) + return -E2BIG; + + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { + ctx->offsets[this_idx] |= OFFSETS_B_CONV; + ctx->long_b_conversion = 1; + } + emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3); + emit_instr(ctx, nop); + emit_instr(ctx, j, target); + emit_instr(ctx, nop); + } else { + emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off); + emit_instr(ctx, nop); + } + +#ifdef __BIG_ENDIAN + need_swap = false; +#else + need_swap = true; +#endif + dst = MIPS_R_V0; + switch (BPF_SIZE(insn->code)) { + case BPF_B: + emit_instr(ctx, lbu, dst, 0, MIPS_R_V0); + break; + case BPF_H: + emit_instr(ctx, lhu, dst, 0, MIPS_R_V0); + if (need_swap) + emit_instr(ctx, wsbh, dst, dst); + break; + case BPF_W: + emit_instr(ctx, lw, dst, 0, MIPS_R_V0); + if (need_swap) { + emit_instr(ctx, wsbh, dst, dst); + emit_instr(ctx, rotr, dst, dst, 16); + } + break; + case BPF_DW: + emit_instr(ctx, ld, dst, 0, MIPS_R_V0); + if (need_swap) { + emit_instr(ctx, dsbh, dst, dst); + emit_instr(ctx, dshd, dst, dst); + } + break; + } + + break; + case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU | BPF_END | BPF_FROM_LE: + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); + if (insn->imm == 64 && td == REG_32BIT) + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + + if (insn->imm != 64 && + (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { + /* sign extend */ + emit_instr(ctx, sll, dst, dst, 0); + } + +#ifdef __BIG_ENDIAN + need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE); +#else + need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE); +#endif + if (insn->imm == 16) { + if (need_swap) + emit_instr(ctx, wsbh, dst, dst); + emit_instr(ctx, andi, dst, dst, 0xffff); + } else if (insn->imm == 32) { + if (need_swap) { + emit_instr(ctx, wsbh, dst, dst); + emit_instr(ctx, rotr, dst, dst, 16); + } + } else { /* 64-bit*/ + if (need_swap) { + emit_instr(ctx, dsbh, dst, dst); + emit_instr(ctx, dshd, dst, dst); + } + } + break; + + case BPF_ST | BPF_B | BPF_MEM: + case BPF_ST | BPF_H | BPF_MEM: + case BPF_ST | BPF_W | BPF_MEM: + case BPF_ST | BPF_DW | BPF_MEM: + if (insn->dst_reg == BPF_REG_10) { + ctx->flags |= EBPF_SEEN_FP; + dst = MIPS_R_SP; + mem_off = insn->off + MAX_BPF_STACK; + } else { + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + mem_off = insn->off; + } + gen_imm_to_reg(insn, MIPS_R_AT, ctx); + switch (BPF_SIZE(insn->code)) { + case BPF_B: + emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst); + break; + case BPF_H: + emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst); + break; + case BPF_W: + emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst); + break; + case BPF_DW: + emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst); + break; + } + break; + + case BPF_LDX | BPF_B | BPF_MEM: + case BPF_LDX | BPF_H | BPF_MEM: + case BPF_LDX | BPF_W | BPF_MEM: + case BPF_LDX | BPF_DW | BPF_MEM: + if (insn->src_reg == BPF_REG_10) { + ctx->flags |= EBPF_SEEN_FP; + src = MIPS_R_SP; + mem_off = insn->off + MAX_BPF_STACK; + } else { + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); + if (src < 0) + return src; + mem_off = insn->off; + } + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + switch (BPF_SIZE(insn->code)) { + case BPF_B: + emit_instr(ctx, lbu, dst, mem_off, src); + break; + case BPF_H: + emit_instr(ctx, lhu, dst, mem_off, src); + break; + case BPF_W: + emit_instr(ctx, lw, dst, mem_off, src); + break; + case BPF_DW: + emit_instr(ctx, ld, dst, mem_off, src); + break; + } + break; + + case BPF_STX | BPF_B | BPF_MEM: + case BPF_STX | BPF_H | BPF_MEM: + case BPF_STX | BPF_W | BPF_MEM: + case BPF_STX | BPF_DW | BPF_MEM: + case BPF_STX | BPF_W | BPF_XADD: + case BPF_STX | BPF_DW | BPF_XADD: + if (insn->dst_reg == BPF_REG_10) { + ctx->flags |= EBPF_SEEN_FP; + dst = MIPS_R_SP; + mem_off = insn->off + MAX_BPF_STACK; + } else { + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); + if (dst < 0) + return dst; + mem_off = insn->off; + } + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); + if (src < 0) + return dst; + if (BPF_MODE(insn->code) == BPF_XADD) { + switch (BPF_SIZE(insn->code)) { + case BPF_W: + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { + emit_instr(ctx, sll, MIPS_R_AT, src, 0); + src = MIPS_R_AT; + } + emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst); + emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src); + emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst); + /* + * On failure back up to LL (-4 + * instructions of 4 bytes each + */ + emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); + emit_instr(ctx, nop); + break; + case BPF_DW: + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { + emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); + emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); + src = MIPS_R_AT; + } + emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst); + emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src); + emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst); + emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); + emit_instr(ctx, nop); + break; + } + } else { /* BPF_MEM */ + switch (BPF_SIZE(insn->code)) { + case BPF_B: + emit_instr(ctx, sb, src, mem_off, dst); + break; + case BPF_H: + emit_instr(ctx, sh, src, mem_off, dst); + break; + case BPF_W: + emit_instr(ctx, sw, src, mem_off, dst); + break; + case BPF_DW: + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { + emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); + emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); + src = MIPS_R_AT; + } + emit_instr(ctx, sd, src, mem_off, dst); + break; + } + } + break; + + default: + pr_err("NOT HANDLED %d - (%02x)\n", + this_idx, (unsigned int)insn->code); + return -EINVAL; + } + return 1; +} + +#define RVT_VISITED_MASK 0xc000000000000000ull +#define RVT_FALL_THROUGH 0x4000000000000000ull +#define RVT_BRANCH_TAKEN 0x8000000000000000ull +#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN) + +static int build_int_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->skf; + const struct bpf_insn *insn; + int i, r; + + for (i = 0; i < prog->len; ) { + insn = prog->insnsi + i; + if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) { + /* dead instruction, don't emit it. */ + i++; + continue; + } + + if (ctx->target == NULL) + ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4); + + r = build_one_insn(insn, ctx, i, prog->len); + if (r < 0) + return r; + i += r; + } + /* epilogue offset */ + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx * 4; + + /* + * All exits have an offset of the epilogue, some offsets may + * not have been set due to banch-around threading, so set + * them now. + */ + if (ctx->target == NULL) + for (i = 0; i < prog->len; i++) { + insn = prog->insnsi + i; + if (insn->code == (BPF_JMP | BPF_EXIT)) + ctx->offsets[i] = ctx->idx * 4; + } + return 0; +} + +/* return the last idx processed, or negative for error */ +static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, + int start_idx, bool follow_taken) +{ + const struct bpf_prog *prog = ctx->skf; + const struct bpf_insn *insn; + u64 exit_rvt = initial_rvt; + u64 *rvt = ctx->reg_val_types; + int idx; + int reg; + + for (idx = start_idx; idx < prog->len; idx++) { + rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt; + insn = prog->insnsi + idx; + switch (BPF_CLASS(insn->code)) { + case BPF_ALU: + switch (BPF_OP(insn->code)) { + case BPF_ADD: + case BPF_SUB: + case BPF_MUL: + case BPF_DIV: + case BPF_OR: + case BPF_AND: + case BPF_LSH: + case BPF_RSH: + case BPF_NEG: + case BPF_MOD: + case BPF_XOR: + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); + break; + case BPF_MOV: + if (BPF_SRC(insn->code)) { + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); + } else { + /* IMM to REG move*/ + if (insn->imm >= 0) + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); + else + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); + } + break; + case BPF_END: + if (insn->imm == 64) + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); + else if (insn->imm == 32) + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); + else /* insn->imm == 16 */ + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); + break; + } + rvt[idx] |= RVT_DONE; + break; + case BPF_ALU64: + switch (BPF_OP(insn->code)) { + case BPF_MOV: + if (BPF_SRC(insn->code)) { + /* REG to REG move*/ + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); + } else { + /* IMM to REG move*/ + if (insn->imm >= 0) + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); + else + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); + } + break; + default: + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); + } + rvt[idx] |= RVT_DONE; + break; + case BPF_LD: + switch (BPF_SIZE(insn->code)) { + case BPF_DW: + if (BPF_MODE(insn->code) == BPF_IMM) { + s64 val; + + val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32)); + if (val > 0 && val <= S32_MAX) + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); + else if (val >= S32_MIN && val <= S32_MAX) + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); + else + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); + rvt[idx] |= RVT_DONE; + idx++; + } else { + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); + } + break; + case BPF_B: + case BPF_H: + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); + break; + case BPF_W: + if (BPF_MODE(insn->code) == BPF_IMM) + set_reg_val_type(&exit_rvt, insn->dst_reg, + insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT); + else + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); + break; + } + rvt[idx] |= RVT_DONE; + break; + case BPF_LDX: + switch (BPF_SIZE(insn->code)) { + case BPF_DW: + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); + break; + case BPF_B: + case BPF_H: + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); + break; + case BPF_W: + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); + break; + } + rvt[idx] |= RVT_DONE; + break; + case BPF_JMP: + switch (BPF_OP(insn->code)) { + case BPF_EXIT: + rvt[idx] = RVT_DONE | exit_rvt; + rvt[prog->len] = exit_rvt; + return idx; + case BPF_JA: + rvt[idx] |= RVT_DONE; + idx += insn->off; + break; + case BPF_JEQ: + case BPF_JGT: + case BPF_JGE: + case BPF_JSET: + case BPF_JNE: + case BPF_JSGT: + case BPF_JSGE: + if (follow_taken) { + rvt[idx] |= RVT_BRANCH_TAKEN; + idx += insn->off; + follow_taken = false; + } else { + rvt[idx] |= RVT_FALL_THROUGH; + } + break; + case BPF_CALL: + set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT); + /* Upon call return, argument registers are clobbered. */ + for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++) + set_reg_val_type(&exit_rvt, reg, REG_64BIT); + + rvt[idx] |= RVT_DONE; + break; + default: + WARN(1, "Unhandled BPF_JMP case.\n"); + rvt[idx] |= RVT_DONE; + break; + } + break; + default: + rvt[idx] |= RVT_DONE; + break; + } + } + return idx; +} + +/* + * Track the value range (i.e. 32-bit vs. 64-bit) of each register at + * each eBPF insn. This allows unneeded sign and zero extension + * operations to be omitted. + * + * Doesn't handle yet confluence of control paths with conflicting + * ranges, but it is good enough for most sane code. + */ +static int reg_val_propagate(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->skf; + u64 exit_rvt; + int reg; + int i; + + /* + * 11 registers * 3 bits/reg leaves top bits free for other + * uses. Bit-62..63 used to see if we have visited an insn. + */ + exit_rvt = 0; + + /* Upon entry, argument registers are 64-bit. */ + for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++) + set_reg_val_type(&exit_rvt, reg, REG_64BIT); + + /* + * First follow all conditional branches on the fall-through + * edge of control flow.. + */ + reg_val_propagate_range(ctx, exit_rvt, 0, false); +restart_search: + /* + * Then repeatedly find the first conditional branch where + * both edges of control flow have not been taken, and follow + * the branch taken edge. We will end up restarting the + * search once per conditional branch insn. + */ + for (i = 0; i < prog->len; i++) { + u64 rvt = ctx->reg_val_types[i]; + + if ((rvt & RVT_VISITED_MASK) == RVT_DONE || + (rvt & RVT_VISITED_MASK) == 0) + continue; + if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) { + reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true); + } else { /* RVT_BRANCH_TAKEN */ + WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n"); + reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false); + } + goto restart_search; + } + /* + * Eventually all conditional branches have been followed on + * both branches and we are done. Any insn that has not been + * visited at this point is dead. + */ + + return 0; +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *p; + + /* We are guaranteed to have aligned memory. */ + for (p = area; size >= sizeof(u32); size -= sizeof(u32)) + uasm_i_break(&p, BRK_BUG); /* Increments p */ +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *orig_prog = prog; + bool tmp_blinded = false; + struct bpf_prog *tmp; + struct bpf_binary_header *header = NULL; + struct jit_ctx ctx; + unsigned int image_size; + u8 *image_ptr; + + if (!bpf_jit_enable || !cpu_has_mips64r2) + return prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + memset(&ctx, 0, sizeof(ctx)); + + ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); + if (ctx.offsets == NULL) + goto out_err; + + ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL); + if (ctx.reg_val_types == NULL) + goto out_err; + + ctx.skf = prog; + + if (reg_val_propagate(&ctx)) + goto out_err; + + /* + * First pass discovers used resources and instruction offsets + * assuming short branches are used. + */ + if (build_int_body(&ctx)) + goto out_err; + + /* + * If no calls are made (EBPF_SAVE_RA), then tail call count + * in $v1, else we must save in n$s4. + */ + if (ctx.flags & EBPF_SEEN_TC) { + if (ctx.flags & EBPF_SAVE_RA) + ctx.flags |= EBPF_SAVE_S4; + else + ctx.flags |= EBPF_TCC_IN_V1; + } + + /* + * Second pass generates offsets, if any branches are out of + * range a jump-around long sequence is generated, and we have + * to try again from the beginning to generate the new + * offsets. This is done until no additional conversions are + * necessary. + */ + do { + ctx.idx = 0; + ctx.gen_b_offsets = 1; + ctx.long_b_conversion = 0; + if (gen_int_prologue(&ctx)) + goto out_err; + if (build_int_body(&ctx)) + goto out_err; + if (build_int_epilogue(&ctx, MIPS_R_RA)) + goto out_err; + } while (ctx.long_b_conversion); + + image_size = 4 * ctx.idx; + + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) + goto out_err; + + ctx.target = (u32 *)image_ptr; + + /* Third pass generates the code */ + ctx.idx = 0; + if (gen_int_prologue(&ctx)) + goto out_err; + if (build_int_body(&ctx)) + goto out_err; + if (build_int_epilogue(&ctx, MIPS_R_RA)) + goto out_err; + + /* Update the icache */ + flush_icache_range((unsigned long)ctx.target, + (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); + + if (bpf_jit_enable > 1) + /* Dump JIT code */ + bpf_jit_dump(prog->len, image_size, 2, ctx.target); + + bpf_jit_binary_lock_ro(header); + prog->bpf_func = (void *)ctx.target; + prog->jited = 1; + prog->jited_len = image_size; +out_normal: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + kfree(ctx.offsets); + kfree(ctx.reg_val_types); + + return prog; + +out_err: + prog = orig_prog; + if (header) + bpf_jit_binary_free(header); + goto out_normal; +} diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index bd67ac74fe2d3420509a03647a20856341bb70c8..9632436d74d7a74b3d584ab6e87a1fc7e55827cc 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM); static int __init pcibios_set_cache_line_size(void) { - struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int lsize; /* * Set PCI cacheline size to that of the highest level in the * cache hierarchy. */ - lsize = c->dcache.linesz; - lsize = c->scache.linesz ? : lsize; - lsize = c->tcache.linesz ? : lsize; + lsize = cpu_dcache_line_size(); + lsize = cpu_scache_line_size() ? : lsize; + lsize = cpu_tcache_line_size() ? : lsize; BUG_ON(!lsize); diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 094a0ee4af46e913f2cb92f25a8503313dccc26c..9be8b08ae46b76a59984f47745138ceb16983e3a 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -12,6 +12,7 @@ #include #include +#include #include #include diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index 974276e828b2cdd3eecb07493f480a8779da2d41..e2690d7ca4ddd992e69fffe802cac6355ad14de8 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv, " syscall\n" : "=r" (ret), "=r" (error) : "r" (tv), "r" (tz), "r" (nr) - : "memory"); + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); return error ? -ret : ret; } @@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid, " syscall\n" : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) - : "memory"); + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); return error ? -ret : ret; } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 531da9eb8f4363ad95c65e16b3d7c9653ac973b8..dda1f558ef35c5fa9113c763c846924ee24f93dc 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -47,6 +47,9 @@ config PARISC and later HP3000 series). The PA-RISC Linux project home page is at . +config CPU_BIG_ENDIAN + def_bool y + config MMU def_bool y diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig index 143d0265279204ef9e51288640ba091eb2453c27..ccc109761f445655218bd3cdc16a16f772b65d77 100644 --- a/arch/parisc/configs/712_defconfig +++ b/arch/parisc/configs/712_defconfig @@ -1,11 +1,9 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y @@ -14,7 +12,6 @@ CONFIG_OPROFILE=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_PA7100LC=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_GSC_LASI=y @@ -32,11 +29,9 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_INET_AH=m CONFIG_INET_ESP=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m # CONFIG_IPV6 is not set CONFIG_NETFILTER=y -CONFIG_IP_NF_QUEUE=m CONFIG_LLC2=m CONFIG_NET_PKTGEN=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -65,21 +60,20 @@ CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_TUN=m -CONFIG_NET_ETHERNET=y -CONFIG_MII=m CONFIG_LASI_82596=y CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPP_MPPE=m CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m # CONFIG_KEYBOARD_HIL_OLD is not set CONFIG_MOUSE_SERIAL=m +CONFIG_LEGACY_PTY_COUNT=64 CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=17 @@ -88,22 +82,17 @@ CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_SERIAL_MUX is not set CONFIG_PDC_CONSOLE=y -CONFIG_LEGACY_PTY_COUNT=64 CONFIG_PRINTER=m CONFIG_PPDEV=m # CONFIG_HW_RANDOM is not set CONFIG_RAW_DRIVER=y # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_DUMMY_CONSOLE_COLUMNS=128 CONFIG_DUMMY_CONSOLE_ROWS=48 CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -111,13 +100,9 @@ CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SEQUENCER=y -CONFIG_SND_MIXER_OSS=y -CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_HARMONY=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_AUTOFS4_FS=y @@ -130,14 +115,10 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_737=m @@ -177,21 +158,16 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m @@ -200,6 +176,7 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set -CONFIG_LIBCRC32C=m +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig index 1a4f776b49b8915dca8cf9d1dbd09e6116b96204..5acb93dcaabfd84680f7c5cdde0a598fc4bf1b3e 100644 --- a/arch/parisc/configs/a500_defconfig +++ b/arch/parisc/configs/a500_defconfig @@ -1,13 +1,10 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y @@ -16,7 +13,6 @@ CONFIG_OPROFILE=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_PA8X00=y CONFIG_64BIT=y CONFIG_SMP=y @@ -43,21 +39,17 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_INET_AH=m CONFIG_INET_ESP=m -# CONFIG_INET_LRO is not set CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y # CONFIG_NETFILTER_XT_MATCH_DCCP is not set -CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_RAW=m @@ -70,7 +62,6 @@ CONFIG_IP6_NF_MATCH_OPTS=m CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m @@ -94,7 +85,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_QLOGIC_1280=m @@ -106,43 +96,38 @@ CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y CONFIG_FUSION=y CONFIG_FUSION_SPI=m -CONFIG_FUSION_FC=m CONFIG_FUSION_CTL=m CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_TUN=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y +CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_3C589=m CONFIG_VORTEX=m CONFIG_TYPHOON=m +CONFIG_ACENIC=m +CONFIG_ACENIC_OMIT_TIGON_I=y +CONFIG_PCNET32=m +CONFIG_TIGON3=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m CONFIG_TULIP=y CONFIG_TULIP_MMIO=y CONFIG_PCMCIA_XIRCOM=m CONFIG_HP100=m -CONFIG_NET_PCI=y -CONFIG_PCNET32=m CONFIG_E100=m -CONFIG_ACENIC=m -CONFIG_ACENIC_OMIT_TIGON_I=y CONFIG_E1000=m -CONFIG_TIGON3=m -CONFIG_NET_PCMCIA=y -CONFIG_PCMCIA_3C589=m -CONFIG_PCMCIA_3C574=m CONFIG_PCMCIA_SMC91C92=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CS=m @@ -151,7 +136,6 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_PDC_CONSOLE=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set CONFIG_RAW_DRIVER=y # CONFIG_HWMON is not set @@ -160,7 +144,6 @@ CONFIG_AGP_PARISC=y # CONFIG_STI_CONSOLE is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set CONFIG_JFS_FS=m CONFIG_XFS_FS=m CONFIG_AUTOFS4_FS=y @@ -173,13 +156,9 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_UFS_FS=m CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFSD=m CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m @@ -187,17 +166,12 @@ CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_HEADERS_CHECK=y -CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_BLOWFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set -CONFIG_LIBCRC32C=m diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig index f1a0c25bef8dc3a6667c608a092f96a39d5994aa..83ffd161aec55fc1f7063c7ed8bcb9f92bcca909 100644 --- a/arch/parisc/configs/b180_defconfig +++ b/arch/parisc/configs/b180_defconfig @@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_SLAB=y CONFIG_MODULES=y @@ -25,8 +24,6 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_LRO is not set -CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -53,10 +50,9 @@ CONFIG_MD_LINEAR=y CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_LASI_82596=y CONFIG_NET_TULIP=y CONFIG_TULIP=y +CONFIG_LASI_82596=y CONFIG_PPP=y CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_HIL_OLD is not set @@ -71,40 +67,31 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_PRINTER=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SEQUENCER=y -CONFIG_SND_MIXER_OSS=y -CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_HARMONY=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set CONFIG_AUTOFS4_FS=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y -CONFIG_SMB_FS=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y CONFIG_HEADERS_CHECK=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SECURITY=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig index 8e8f0e34f8174c5905ca8b3b3b65c829c3557265..0764d3971cf66e456a03e26f7542a11ddd98f86f 100644 --- a/arch/parisc/configs/c3000_defconfig +++ b/arch/parisc/configs/c3000_defconfig @@ -1,12 +1,9 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y @@ -15,7 +12,6 @@ CONFIG_OPROFILE=m CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_PA8X00=y CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_GSC is not set @@ -31,13 +27,11 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_PNP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set CONFIG_INET6_IPCOMP=m CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y CONFIG_NETFILTER_DEBUG=y -CONFIG_IP_NF_QUEUE=m CONFIG_NET_PKTGEN=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y @@ -50,13 +44,11 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_NS87415=y -CONFIG_PATA_SIL680=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 @@ -76,28 +68,23 @@ CONFIG_FUSION=y CONFIG_FUSION_SPI=m CONFIG_FUSION_CTL=m CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_TUN=m -CONFIG_NET_ETHERNET=y +CONFIG_ACENIC=m +CONFIG_TIGON3=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m CONFIG_TULIP=y CONFIG_TULIP_MMIO=y -CONFIG_NET_PCI=y CONFIG_E100=m -CONFIG_ACENIC=m CONFIG_E1000=m -CONFIG_TIGON3=m CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200 +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set CONFIG_SERIO=m @@ -111,7 +98,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set CONFIG_RAW_DRIVER=y # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y @@ -121,9 +107,6 @@ CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SEQUENCER=y -CONFIG_SND_MIXER_OSS=y -CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_AD1889=y CONFIG_USB_HIDDEV=y CONFIG_USB=y @@ -139,7 +122,6 @@ CONFIG_USB_MICROTEK=m CONFIG_USB_LEGOTOWER=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set CONFIG_XFS_FS=m CONFIG_AUTOFS4_FS=y CONFIG_ISO9660_FS=y @@ -149,7 +131,6 @@ CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y @@ -159,18 +140,13 @@ CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m -CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_HEADERS_CHECK=y -CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MUTEXES=y # CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_MD5=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_DES=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set -CONFIG_LIBCRC32C=m diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig index f6a4c016304b657149d8c87f23e0c564e875a36b..088ab948a5caf8b8f7455233c1cba5295e00cc0f 100644 --- a/arch/parisc/configs/c8000_defconfig +++ b/arch/parisc/configs/c8000_defconfig @@ -1,16 +1,13 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_LZO=y CONFIG_EXPERT=y CONFIG_SYSCTL_SYSCALL=y CONFIG_SLAB=y @@ -23,7 +20,6 @@ CONFIG_PA8X00=y CONFIG_64BIT=y CONFIG_SMP=y CONFIG_PREEMPT=y -# CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_IOMMU_CCIO=y CONFIG_PCI=y CONFIG_PCI_LBA=y @@ -146,7 +142,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y # CONFIG_FB_STI is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set # CONFIG_BACKLIGHT_GENERIC is not set CONFIG_FRAMEBUFFER_CONSOLE=y @@ -157,12 +152,9 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_SOUND=m CONFIG_SND=m +CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y -CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_AD1889=m # CONFIG_SND_USB is not set # CONFIG_SND_GSC is not set @@ -174,8 +166,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT4_FS=m CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y CONFIG_XFS_FS=m @@ -238,11 +228,8 @@ CONFIG_DEBUG_SLAB=y CONFIG_DEBUG_SLAB_LEAK=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y CONFIG_PANIC_ON_OOPS=y CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_PROVE_RCU_DELAY=y CONFIG_DEBUG_BLOCK_EXT_DEVT=y CONFIG_LATENCYTOP=y CONFIG_KEYS=y diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig index 310b6657e4ac9b38380eaecab1eccf12aeaad0d6..52c9050a7c5c6d5305923a5e3f357ab9f6515a78 100644 --- a/arch/parisc/configs/default_defconfig +++ b/arch/parisc/configs/default_defconfig @@ -1,11 +1,9 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y @@ -41,9 +39,7 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_INET_AH=m CONFIG_INET_ESP=m -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m -CONFIG_IPV6=y CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y @@ -82,26 +78,23 @@ CONFIG_MD_RAID1=y CONFIG_MD_RAID10=y CONFIG_BLK_DEV_DM=y CONFIG_NETDEVICES=y -CONFIG_DUMMY=m CONFIG_BONDING=m +CONFIG_DUMMY=m CONFIG_TUN=m -CONFIG_NET_ETHERNET=y -CONFIG_MII=m -CONFIG_LASI_82596=y -CONFIG_NET_TULIP=y -CONFIG_TULIP=y -CONFIG_NET_PCI=y CONFIG_ACENIC=y CONFIG_TIGON3=y -CONFIG_NET_PCMCIA=y +CONFIG_NET_TULIP=y +CONFIG_TULIP=y +CONFIG_LASI_82596=y CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m # CONFIG_KEYBOARD_HIL_OLD is not set CONFIG_MOUSE_SERIAL=y +CONFIG_LEGACY_PTY_COUNT=64 CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CS=y @@ -109,31 +102,24 @@ CONFIG_SERIAL_8250_NR_UARTS=17 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_LEGACY_PTY_COUNT=64 CONFIG_PRINTER=m CONFIG_PPDEV=m # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_DUMMY_CONSOLE_COLUMNS=128 CONFIG_DUMMY_CONSOLE_ROWS=48 CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FONTS=y -CONFIG_FONT_8x16=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_MIXER_OSS=y -CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_SEQUENCER=y CONFIG_SND_AD1889=y CONFIG_SND_HARMONY=y CONFIG_HID_GYRATION=y @@ -141,7 +127,6 @@ CONFIG_HID_NTRIG=y CONFIG_HID_PANTHERLORD=y CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_HID_TOPSEED=y CONFIG_USB=y @@ -150,21 +135,15 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set -CONFIG_AUTOFS_FS=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V4=y -CONFIG_RPCSEC_GSS_SPKM3=m -CONFIG_SMB_FS=m -CONFIG_SMB_NLS_DEFAULT=y CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -204,30 +183,24 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=y -CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_HEADERS_CHECK=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_KEYS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set CONFIG_LIBCRC32C=m +CONFIG_FONTS=y diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 8688ba7f5966af8d7b626054a366d44c0448becd..37ae4b57c00151323e0561551a9aa1a3ff0d8d29 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -2,15 +2,11 @@ CONFIG_LOCALVERSION="-32bit" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_LZO=y CONFIG_EXPERT=y CONFIG_SYSCTL_SYSCALL=y CONFIG_PERF_EVENTS=y @@ -49,7 +45,6 @@ CONFIG_INET_ESP=m # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_LLC2=m # CONFIG_WIRELESS is not set @@ -149,10 +144,8 @@ CONFIG_PRINTER=m CONFIG_PPDEV=m # CONFIG_HW_RANDOM is not set CONFIG_I2C=y -CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set CONFIG_AGP=y -CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_FB=y CONFIG_FB_FOREIGN_ENDIAN=y CONFIG_FB_MODE_HELPERS=y @@ -169,11 +162,8 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_SEQUENCER=m CONFIG_SND_AD1889=m CONFIG_SND_HARMONY=m CONFIG_HIDRAW=y @@ -223,12 +213,7 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_RT=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -293,15 +278,12 @@ CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y -CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y -CONFIG_RCU_CPU_STALL_INFO=y CONFIG_LATENCYTOP=y CONFIG_LKDTM=m CONFIG_KEYS=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5=y @@ -320,7 +302,6 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_CCITT=m CONFIG_CRC_T10DIF=y CONFIG_FONTS=y diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index c564e6e1fa23424c39efa967cce4a42ac9ae4f2f..d39e7f821aba318377b16bf2ef48b58315f5e252 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -8,10 +8,11 @@ CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CPUSETS=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -52,7 +53,6 @@ CONFIG_INET_ESP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_LRO=m CONFIG_INET_DIAG=m CONFIG_NETFILTER=y # CONFIG_NETFILTER_ADVANCED is not set @@ -84,7 +84,6 @@ CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y CONFIG_MD=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m CONFIG_BLK_DEV_DM=m CONFIG_DM_RAID=m CONFIG_DM_UEVENT=y @@ -138,21 +137,21 @@ CONFIG_QLGE=m # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MDIO_BITBANG=m CONFIG_PHYLIB=y -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m CONFIG_ICPLUS_PHY=m -CONFIG_REALTEK_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m CONFIG_STE10XP=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MDIO_BITBANG=m +CONFIG_VITESSE_PHY=m CONFIG_SLIP=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y @@ -166,10 +165,8 @@ CONFIG_INPUT_MISC=y CONFIG_SERIO_SERPORT=m # CONFIG_HP_SDC is not set CONFIG_SERIO_RAW=m -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y # CONFIG_LEGACY_PTYS is not set CONFIG_NOZOMI=m -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -207,10 +204,8 @@ CONFIG_AGP=y CONFIG_AGP_PARISC=y CONFIG_DRM=y CONFIG_DRM_RADEON=y -CONFIG_DRM_RADEON_UMS=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_BACKLIGHT_GENERIC is not set CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y @@ -246,8 +241,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_BTRFS_FS=m CONFIG_QUOTA=y @@ -286,27 +279,16 @@ CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y # CONFIG_SCHED_DEBUG is not set -CONFIG_TIMER_STATS=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_DEFLATE=m # CONFIG_CRYPTO_HW is not set CONFIG_CRC_CCITT=m CONFIG_LIBCRC32C=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h index 32e105fb8adb362f261d20432f1f8af068086376..e3c0586260d8a34cd188764dae1e5452eefd7c22 100644 --- a/arch/parisc/include/asm/pdcpat.h +++ b/arch/parisc/include/asm/pdcpat.h @@ -150,7 +150,7 @@ #define PDC_PAT_MEM_SETGM 9L /* Set Good Memory value */ #define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */ #define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */ - /* Memory Address */ + /* Memory Address */ #define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */ #define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */ #define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */ @@ -228,6 +228,17 @@ struct pdc_pat_mem_read_pd_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_READ */ unsigned long pdt_entries; }; +struct pdc_pat_mem_phys_mem_location { /* PDC_PAT_MEM/PDC_PAT_MEM_ADDRESS */ + u64 cabinet:8; + u64 ign1:8; + u64 ign2:8; + u64 cell_slot:8; + u64 ign3:8; + u64 dimm_slot:8; /* DIMM slot, e.g. 0x1A, 0x2B, show user hex value! */ + u64 ign4:8; + u64 source:4; /* for mem: always 0x07 */ + u64 source_detail:4; /* for mem: always 0x04 (SIMM or DIMM) */ +}; struct pdc_pat_pd_addr_map_entry { unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */ @@ -319,6 +330,9 @@ extern int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, unsigned long *pdt_entries_ptr, unsigned long count, unsigned long offset); +extern int pdc_pat_mem_get_dimm_phys_location( + struct pdc_pat_mem_phys_mem_location *pret, + unsigned long phys_addr); #endif /* __ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 88fe0aad4390b10830ce1bc1be62925d4b2d4bbc..bc208136bbb26a837a978cdf77304f1c9dec0b2d 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -34,7 +34,7 @@ struct thread_info { /* thread information allocation */ -#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */ +#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */ /* Be sure to hunt all references to this down when you change the size of * the kernel stack */ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c32a0909521665b5f08c22ef37fa8d8f9c654012..19c0c141bc3f9f0edd509708f978a2d7ca16c230 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, before it can be accessed through the kernel mapping. */ preempt_disable(); flush_dcache_page_asm(__pa(vfrom), vaddr); - preempt_enable(); copy_page_asm(vto, vfrom); + preempt_enable(); } EXPORT_SYMBOL(copy_user_page); @@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm) struct vm_area_struct *vma; pgd_t *pgd; + /* Flush the TLB to avoid speculation if coherency is required. */ + if (parisc_requires_coherency()) + flush_tlb_all(); + /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ if (mm_total_size(mm) >= parisc_cache_flush_threshold) { @@ -577,33 +581,21 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long addr; - pgd_t *pgd; - BUG_ON(!vma->vm_mm->context); - if ((end - start) >= parisc_cache_flush_threshold) { - flush_cache_all(); - return; - } + /* Flush the TLB to avoid speculation if coherency is required. */ + if (parisc_requires_coherency()) + flush_tlb_range(vma, start, end); - if (vma->vm_mm->context == mfsp(3)) { - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); + if ((end - start) >= parisc_cache_flush_threshold + || vma->vm_mm->context != mfsp(3)) { + flush_cache_all(); return; } - pgd = vma->vm_mm->pgd; - for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - unsigned long pfn; - pte_t *ptep = get_ptep(pgd, addr); - if (!ptep) - continue; - pfn = pte_pfn(*ptep); - if (pfn_valid(pfn)) - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } + flush_user_dcache_range_asm(start, end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(start, end); } void @@ -612,7 +604,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long BUG_ON(!vma->vm_mm->context); if (pfn_valid(pfn)) { - flush_tlb_page(vma, vmaddr); + if (parisc_requires_coherency()) + flush_tlb_page(vma, vmaddr); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } } diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 98190252c12fdc801ca8f7f328733727577f48f2..f622a311d04a1a99676f5b90469d18940a9d8294 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1481,12 +1481,44 @@ int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, unsigned long offset) { int retval; - unsigned long flags; + unsigned long flags, entries; spin_lock_irqsave(&pdc_lock, flags); retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ, - __pa(&pret), __pa(pdt_entries_ptr), + __pa(&pdc_result), __pa(pdt_entries_ptr), count, offset); + + if (retval == PDC_OK) { + entries = min(pdc_result[0], count); + pret->actual_count_bytes = entries; + pret->pdt_entries = entries / sizeof(unsigned long); + } + + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + +/** + * pdc_pat_mem_get_dimm_phys_location - Get physical DIMM slot via PAT firmware + * @pret: ptr to hold returned information + * @phys_addr: physical address to examine + * + */ +int pdc_pat_mem_get_dimm_phys_location( + struct pdc_pat_mem_phys_mem_location *pret, + unsigned long phys_addr) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_ADDRESS, + __pa(&pdc_result), phys_addr); + + if (retval == PDC_OK) + memcpy(pret, &pdc_result, sizeof(*pret)); + spin_unlock_irqrestore(&pdc_lock, flags); return retval; diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index ba5e1c7b1f177d45f743392c0950017622b143a8..0ca254085a6626374ed488401618ca3af19872c7 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr) /* * IRQ STACK - used for irq handler */ -#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ +#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ union irq_stack_union { unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; @@ -413,6 +413,10 @@ static inline void stack_overflow_check(struct pt_regs *regs) if (regs->sr[7]) return; + /* exit if already in panic */ + if (sysctl_panic_on_stackoverflow < 0) + return; + /* calculate kernel stack usage */ stack_usage = sp - stack_start; #ifdef CONFIG_IRQSTACKS @@ -454,8 +458,10 @@ static inline void stack_overflow_check(struct pt_regs *regs) #ifdef CONFIG_IRQSTACKS panic_check: #endif - if (sysctl_panic_on_stackoverflow) + if (sysctl_panic_on_stackoverflow) { + sysctl_panic_on_stackoverflow = -1; /* disable further checks */ panic("low stack detected by irq handler - check messages\n"); + } #endif } diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index f3a797e670b09461b45e1e7163d036fd457beb18..d02874ecb94df27d3171ae221ba693f4f8ca0a9c 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -112,10 +112,12 @@ void __init pdc_pdt_init(void) #ifdef CONFIG_64BIT struct pdc_pat_mem_read_pd_retinfo pat_pret; + /* try old obsolete PAT firmware function first */ + pdt_type = PDT_PAT_OLD; ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry, MAX_PDT_ENTRIES); if (ret != PDC_OK) { - pdt_type = PDT_PAT_OLD; + pdt_type = PDT_PAT_NEW; ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry, MAX_PDT_TABLE_SIZE, 0); } @@ -131,11 +133,20 @@ void __init pdc_pdt_init(void) } for (i = 0; i < pdt_status.pdt_entries; i++) { - if (i < 20) - pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n", - i, - pdt_entry[i] & PAGE_MASK, - pdt_entry[i] & 1); + struct pdc_pat_mem_phys_mem_location loc; + + /* get DIMM slot number */ + loc.dimm_slot = 0xff; +#ifdef CONFIG_64BIT + pdc_pat_mem_get_dimm_phys_location(&loc, pdt_entry[i]); +#endif + + pr_warn("PDT: BAD PAGE #%d at 0x%08lx, " + "DIMM slot %02x (error_type = %lu)\n", + i, + pdt_entry[i] & PAGE_MASK, + loc.dimm_slot, + pdt_entry[i] & 1); /* mark memory page bad */ memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b64d7d21646ed50c4a5c1f046b0f8ca758abfcc6..a45a67d526f8ca8001fd1d06625b3b233d5a3835 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -145,6 +146,7 @@ void machine_power_off(void) /* prevent soft lockup/stalled CPU messages for endless loop. */ rcu_sysrq_start(); + lockup_detector_suspend(); for (;;); } diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 3d6ef1b29c6ad312e5114f86ad737d13bf606403..ffe2cbf52d1a25617955c54f73890fdb02e7e394 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -78,6 +78,8 @@ SECTIONS *(.text.sys_exit) *(.text.do_sigaltstack) *(.text.do_fork) + *(.text.div) + *($$*) /* millicode routines */ *(.text.*) *(.fixup) *(.lock.text) /* out-of-line lock text */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 36f858c37ca70b576e851a52ada48e2400de86a1..81b0031f909f6d65855ed3051505a4baa9469a64 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -199,7 +199,7 @@ config PPC select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 - select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 8d4ed73d549091368be87c5b4483f33d6b00640d..e2b3e7a00c9e3548e0b0086f878d4b707ba40dbb 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -59,6 +59,19 @@ machine-$(CONFIG_PPC64) += 64 machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le UTS_MACHINE := $(subst $(space),,$(machine-y)) +# XXX This needs to be before we override LD below +ifdef CONFIG_PPC32 +KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o +else +ifeq ($(call ld-ifversion, -ge, 225000000, y),y) +# Have the linker provide sfpr if possible. +# There is a corresponding test in arch/powerpc/lib/Makefile +KBUILD_LDFLAGS_MODULE += --save-restore-funcs +else +KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o +endif +endif + ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) override LD += -EL LDEMULATION := lppc @@ -190,18 +203,6 @@ else CHECKFLAGS += -D__LITTLE_ENDIAN__ endif -ifdef CONFIG_PPC32 -KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o -else -ifeq ($(call ld-ifversion, -ge, 225000000, y),y) -# Have the linker provide sfpr if possible. -# There is a corresponding test in arch/powerpc/lib/Makefile -KBUILD_LDFLAGS_MODULE += --save-restore-funcs -else -KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o -endif -endif - ifeq ($(CONFIG_476FPE_ERR46),y) KBUILD_LDFLAGS_MODULE += --ppc476-workaround \ -T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index a7814a7b15233261b0d9ba9d848543948dcd34d6..6f952fe1f0842232fab09eba6d89953d09946111 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -25,12 +25,20 @@ compress-$(CONFIG_KERNEL_XZ) := CONFIG_KERNEL_XZ BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -Os -msoft-float -pipe \ -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ - -isystem $(shell $(CROSS32CC) -print-file-name=include) \ -D$(compress-y) +BOOTCC := $(CC) ifdef CONFIG_PPC64_BOOT_WRAPPER BOOTCFLAGS += -m64 +else +BOOTCFLAGS += -m32 +ifdef CROSS32_COMPILE + BOOTCC := $(CROSS32_COMPILE)gcc +endif endif + +BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) + ifdef CONFIG_CPU_BIG_ENDIAN BOOTCFLAGS += -mbig-endian else @@ -183,10 +191,10 @@ clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \ empty.c zImage.coff.lds zImage.ps3.lds zImage.lds quiet_cmd_bootcc = BOOTCC $@ - cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $< + cmd_bootcc = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $< quiet_cmd_bootas = BOOTAS $@ - cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< + cmd_bootas = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< quiet_cmd_bootar = BOOTAR $@ cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 0695ce047d565199e4501333fa41ece48cdf9e45..34fc9bbfca9e68d6372e1d34b79ebf95d978e685 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 5175028c56ce74e3e50a2b30eabccf7b87ed8f0e..c5246d29f3859965316bd4d48e4e816283439bf0 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_DEBUG_MUTEXES=y CONFIG_LATENCYTOP=y CONFIG_SCHED_TRACER=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 1a61aa20dfbac9d5072ae83ef90640b8be380bd3..fd5d98a0b95c7b1ae5fda56892c2ecd43ea29f3a 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 77529a3e38114cd4e5d413d687184e037827ea3a..5b4023c616f7087d247d17509f170944b26f6a76 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -59,13 +59,14 @@ extern struct patb_entry *partition_tb; #define PRTS_MASK 0x1f /* process table size field */ #define PRTB_MASK 0x0ffffffffffff000UL -/* - * Limit process table to PAGE_SIZE table. This - * also limit the max pid we can support. - * MAX_USER_CONTEXT * 16 bytes of space. - */ -#define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4) -#define PRTB_ENTRIES (1ul << CONTEXT_BITS) +/* Number of supported PID bits */ +extern unsigned int mmu_pid_bits; + +/* Base PID to allocate from */ +extern unsigned int mmu_base_pid; + +#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) +#define PRTB_ENTRIES (1ul << mmu_pid_bits) /* * Power9 currently only support 64K partition table size. diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index d1da415e283cd270d8d2d089508558d369c7cf98..818a58fc3f4f967d3bc4524c32e64ba4529c4cca 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -608,9 +608,17 @@ static inline pte_t pte_mkdevmap(pte_t pte) return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP); } +/* + * This is potentially called with a pmd as the argument, in which case it's not + * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set. + * That's because the bit we use for _PAGE_DEVMAP is not reserved for software + * use in page directory entries (ie. non-ptes). + */ static inline int pte_devmap(pte_t pte) { - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP)); + u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE); + + return (pte_raw(pte) & mask) == mask; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index da7e9432fa8fc512b8c00f0ecc57bd626cf56911..0c76675394c5930d5cecf2ac01a2718c8e9b6c1f 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -45,7 +45,7 @@ extern void set_context(unsigned long id, pgd_t *pgd); #ifdef CONFIG_PPC_BOOK3S_64 extern void radix__switch_mmu_context(struct mm_struct *prev, - struct mm_struct *next); + struct mm_struct *next); static inline void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) @@ -67,6 +67,12 @@ extern void __destroy_context(unsigned long context_id); extern void mmu_context_init(void); #endif +#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) +extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); +#else +static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { } +#endif + extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); @@ -79,9 +85,13 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { + bool new_on_cpu = false; + /* Mark this context has been used on the new CPU */ - if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + new_on_cpu = true; + } /* 32-bit keeps track of the current PGDIR in the thread struct */ #ifdef CONFIG_PPC32 @@ -109,6 +119,10 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, if (cpu_has_feature(CPU_FTR_ALTIVEC)) asm volatile ("dssall"); #endif /* CONFIG_ALTIVEC */ + + if (new_on_cpu) + radix_kvm_prefetch_workaround(next); + /* * The actual HW switching method differs between the various * sub architectures. Out of line for now diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 49d8422767b4de686ec0ee64fbf69ac415f05003..e925c1c99c71cab982967e7f7df6325e3135506f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -223,17 +223,27 @@ system_call_exit: andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- .Lsyscall_exit_work - /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */ - li r7,MSR_FP + andi. r0,r8,MSR_FP + beq 2f #ifdef CONFIG_ALTIVEC - oris r7,r7,MSR_VEC@h + andis. r0,r8,MSR_VEC@h + bne 3f #endif - and r0,r8,r7 - cmpd r0,r7 - bne .Lsyscall_restore_math -.Lsyscall_restore_math_cont: +2: addi r3,r1,STACK_FRAME_OVERHEAD +#ifdef CONFIG_PPC_BOOK3S + li r10,MSR_RI + mtmsrd r10,1 /* Restore RI */ +#endif + bl restore_math +#ifdef CONFIG_PPC_BOOK3S + li r11,0 + mtmsrd r11,1 +#endif + ld r8,_MSR(r1) + ld r3,RESULT(r1) + li r11,-MAX_ERRNO - cmpld r3,r11 +3: cmpld r3,r11 ld r5,_CCR(r1) bge- .Lsyscall_error .Lsyscall_error_cont: @@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) std r5,_CCR(r1) b .Lsyscall_error_cont -.Lsyscall_restore_math: - /* - * Some initial tests from restore_math to avoid the heavyweight - * C code entry and MSR manipulations. - */ - LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK) - and. r0,r0,r8 - bne 1f - - ld r7,PACACURRENT(r13) - lbz r0,THREAD+THREAD_LOAD_FP(r7) -#ifdef CONFIG_ALTIVEC - lbz r6,THREAD+THREAD_LOAD_VEC(r7) - add r0,r0,r6 -#endif - cmpdi r0,0 - beq .Lsyscall_restore_math_cont - -1: addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_PPC_BOOK3S - li r10,MSR_RI - mtmsrd r10,1 /* Restore RI */ -#endif - bl restore_math -#ifdef CONFIG_PPC_BOOK3S - li r11,0 - mtmsrd r11,1 -#endif - /* Restore volatiles, reload MSR from updated one */ - ld r8,_MSR(r1) - ld r3,RESULT(r1) - li r11,-MAX_ERRNO - b .Lsyscall_restore_math_cont - /* Traced system call support */ .Lsyscall_dotrace: bl save_nvgprs diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 9029afd1fa2ab2ce231045659abf11ec9eaa32f6..f14f3c04ec7e0c7d366f60d161ff0ced6f1497ad 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1325,10 +1325,18 @@ EXC_VIRT_NONE(0x5800, 0x100) std r10,PACA_EXGEN+EX_R13(r13); \ EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H) +/* + * Branch to soft_nmi_interrupt using the emergency stack. The emergency + * stack is one that is usable by maskable interrupts so long as MSR_EE + * remains off. It is used for recovery when something has corrupted the + * normal kernel stack, for example. The "soft NMI" must not use the process + * stack because we want irq disabled sections to avoid touching the stack + * at all (other than PMU interrupts), so use the emergency stack for this, + * and run it entirely with interrupts hard disabled. + */ EXC_COMMON_BEGIN(soft_nmi_common) mr r10,r1 ld r1,PACAEMERGSP(r13) - ld r1,PACA_NMI_EMERG_SP(r13) subi r1,r1,INT_FRAME_SIZE EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900, system_reset, soft_nmi_interrupt, diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 516ebef905c063b6bdb696f5f6048da37f7822cd..e6252c5a57a4a06f10bd13710ceecdcf72c3d26e 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -460,11 +460,17 @@ pnv_restore_hyp_resource_arch300: /* * Workaround for POWER9, if we lost resources, the ERAT * might have been mixed up and needs flushing. We also need - * to reload MMCR0 (see comment above). + * to reload MMCR0 (see comment above). We also need to set + * then clear bit 60 in MMCRA to ensure the PMU starts running. */ blt cr3,1f PPC_INVALIDATE_ERAT ld r1,PACAR1(r13) + mfspr r4,SPRN_MMCRA + ori r4,r4,(1 << (63-60)) + mtspr SPRN_MMCRA,r4 + xori r4,r4,(1 << (63-60)) + mtspr SPRN_MMCRA,r4 ld r4,_MMCR0(r1) mtspr SPRN_MMCR0,r4 1: diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 0bcec745a6724771c076e34132fb40e158b318f1..f291f7826abc65fe27bd9502ff45d922fa3f9e4e 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -145,6 +145,19 @@ notrace unsigned int __check_irq_replay(void) /* Clear bit 0 which we wouldn't clear otherwise */ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + if (happened & PACA_IRQ_HARD_DIS) { + /* + * We may have missed a decrementer interrupt if hard disabled. + * Check the decrementer register in case we had a rollover + * while hard disabled. + */ + if (!(happened & PACA_IRQ_DEC)) { + if (decrementer_check_overflow()) { + local_paca->irq_happened |= PACA_IRQ_DEC; + happened |= PACA_IRQ_DEC; + } + } + } /* * Force the delivery of pending soft-disabled interrupts on PS3. @@ -170,7 +183,7 @@ notrace unsigned int __check_irq_replay(void) * in case we also had a rollover while hard disabled */ local_paca->irq_happened &= ~PACA_IRQ_DEC; - if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) + if (happened & PACA_IRQ_DEC) return 0x900; /* Finally check if an external interrupt happened */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9f3e2c932dccc1c3a1158fc174a8cf57e63dd75d..1f0fd361e09b9415f81242d67d9b939f36fd63ba 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -362,7 +362,8 @@ void enable_kernel_vsx(void) cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); - if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { + if (current->thread.regs && + (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) { check_if_tm_restore_required(current); /* * If a thread has already been reclaimed then the @@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); - if (tsk->thread.regs->msr & MSR_VSX) { + if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) { BUG_ON(tsk != current); giveup_vsx(tsk); } @@ -511,10 +512,6 @@ void restore_math(struct pt_regs *regs) { unsigned long msr; - /* - * Syscall exit makes a similar initial check before branching - * to restore_math. Keep them in synch. - */ if (!msr_tm_active(regs->msr) && !current->thread.load_fp && !loadvec(current->thread)) return; diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 925a4ef9055932174b4dc5a8f0424b330149132b..660ed39e9c9a59fc3b7087362787502a41ba50fd 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk) * If task is not current, it will have been flushed already to * it's thread_struct during __switch_to(). * - * A reclaim flushes ALL the state. + * A reclaim flushes ALL the state or if not in TM save TM SPRs + * in the appropriate thread structures from live. */ - if (tsk == current && MSR_TM_SUSPENDED(mfmsr())) - tm_reclaim_current(TM_CAUSE_SIGNAL); + if (tsk != current) + return; + if (MSR_TM_SUSPENDED(mfmsr())) { + tm_reclaim_current(TM_CAUSE_SIGNAL); + } else { + tm_enable(); + tm_save_sprs(&(tsk->thread)); + } } #else static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 997c88d54acf292b3e80beef1791ee2194ca9071..8d3320562c70f3ef7308645fb7b805fc14794e42 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags) hard_irq_disable(); while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { raw_local_irq_restore(*flags); - cpu_relax(); + spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); raw_local_irq_save(*flags); hard_irq_disable(); } @@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags) static void nmi_ipi_lock(void) { while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) - cpu_relax(); + spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); } static void nmi_ipi_unlock(void) @@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) nmi_ipi_lock_start(&flags); while (nmi_ipi_busy_count) { nmi_ipi_unlock_end(&flags); - cpu_relax(); + spin_until_cond(nmi_ipi_busy_count == 0); nmi_ipi_lock_start(&flags); } @@ -1003,21 +1003,13 @@ static struct sched_domain_topology_level powerpc_topology[] = { { NULL, }, }; -static __init long smp_setup_cpu_workfn(void *data __always_unused) -{ - smp_ops->setup_cpu(boot_cpuid); - return 0; -} - void __init smp_cpus_done(unsigned int max_cpus) { /* - * We want the setup_cpu() here to be called on the boot CPU, but - * init might run on any CPU, so make sure it's invoked on the boot - * CPU. + * We are running pinned to the boot CPU, see rest_init(). */ if (smp_ops && smp_ops->setup_cpu) - work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL); + smp_ops->setup_cpu(boot_cpuid); if (smp_ops && smp_ops->bringup_done) smp_ops->bringup_done(); diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index b67f8b03a32d0f12ce29eeb4ac3be0a97384fe72..34721a257a770c450baac0288f8006c94ff1975b 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags) * This may be called from low level interrupt handlers at some * point in future. */ - local_irq_save(*flags); - while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) - cpu_relax(); + raw_local_irq_save(*flags); + hard_irq_disable(); /* Make it soft-NMI safe */ + while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) { + raw_local_irq_restore(*flags); + spin_until_cond(!test_bit(0, &__wd_smp_lock)); + raw_local_irq_save(*flags); + hard_irq_disable(); + } } static inline void wd_smp_unlock(unsigned long *flags) { clear_bit_unlock(0, &__wd_smp_lock); - local_irq_restore(*flags); + raw_local_irq_restore(*flags); } static void wd_lockup_ipi(struct pt_regs *regs) @@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs) nmi_panic(regs, "Hard LOCKUP"); } -static void set_cpu_stuck(int cpu, u64 tb) +static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb) { - cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); - cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask); + cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask); if (cpumask_empty(&wd_smp_cpus_pending)) { wd_smp_last_reset_tb = tb; cpumask_andnot(&wd_smp_cpus_pending, @@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb) &wd_smp_cpus_stuck); } } +static void set_cpu_stuck(int cpu, u64 tb) +{ + set_cpumask_stuck(cpumask_of(cpu), tb); +} static void watchdog_smp_panic(int cpu, u64 tb) { @@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb) } smp_flush_nmi_ipi(1000000); - /* Take the stuck CPU out of the watch group */ - for_each_cpu(c, &wd_smp_cpus_pending) - set_cpu_stuck(c, tb); + /* Take the stuck CPUs out of the watch group */ + set_cpumask_stuck(&wd_smp_cpus_pending, tb); -out: wd_smp_unlock(&flags); printk_safe_flush(); @@ -152,6 +159,11 @@ static void watchdog_smp_panic(int cpu, u64 tb) if (hardlockup_panic) nmi_panic(NULL, "Hard LOCKUP"); + + return; + +out: + wd_smp_unlock(&flags); } static void wd_smp_clear_cpu_pending(int cpu, u64 tb) @@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data) void arch_touch_nmi_watchdog(void) { + unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; int cpu = smp_processor_id(); - watchdog_timer_interrupt(cpu); + if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks) + watchdog_timer_interrupt(cpu); } EXPORT_SYMBOL(arch_touch_nmi_watchdog); @@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu) static int start_wd_on_cpu(unsigned int cpu) { + unsigned long flags; + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { WARN_ON(1); return 0; @@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu) if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) return 0; + wd_smp_lock(&flags); cpumask_set_cpu(cpu, &wd_cpus_enabled); if (cpumask_weight(&wd_cpus_enabled) == 1) { cpumask_set_cpu(cpu, &wd_smp_cpus_pending); wd_smp_last_reset_tb = get_tb(); } - smp_wmb(); + wd_smp_unlock(&flags); + start_watchdog_timer_on(cpu); return 0; @@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu) static int stop_wd_on_cpu(unsigned int cpu) { + unsigned long flags; + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) return 0; /* Can happen in CPU unplug case */ stop_watchdog_timer_on(cpu); + wd_smp_lock(&flags); cpumask_clear_cpu(cpu, &wd_cpus_enabled); + wd_smp_unlock(&flags); + wd_smp_clear_cpu_pending(cpu, get_tb()); return 0; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 8cb0190e2a737aab903d350bb6d79422a8c37ac5..b42812e014c04b2fc96cdf5d183c23df0ee88b53 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -164,8 +164,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) goto out; } - if (kvm->arch.hpt.virt) + if (kvm->arch.hpt.virt) { kvmppc_free_hpt(&kvm->arch.hpt); + kvmppc_rmap_reset(kvm); + } err = kvmppc_allocate_hpt(&info, order); if (err < 0) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0b436df746fcb094d8bd8b3c928c0862d6a90df5..359c79cdf0cc821d87a4e4322177294648b42ee3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3211,6 +3211,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) run->fail_entry.hardware_entry_failure_reason = 0; return -EINVAL; } + /* Enable TM so we can read the TM SPRs */ + mtmsr(mfmsr() | MSR_TM); current->thread.tm_tfhar = mfspr(SPRN_TFHAR); current->thread.tm_tfiar = mfspr(SPRN_TFIAR); current->thread.tm_texasr = mfspr(SPRN_TEXASR); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index cb44065e29463f99d6c44bf129ca01e82695a8ff..c52184a8efdf025c1efffc6dd7310e9374dca630 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1443,12 +1443,14 @@ mc_cont: ori r6,r6,1 mtspr SPRN_CTRLT,r6 4: - /* Read the guest SLB and save it away */ + /* Check if we are running hash or radix and store it in cr2 */ ld r5, VCPU_KVM(r9) lbz r0, KVM_RADIX(r5) - cmpwi r0, 0 + cmpwi cr2,r0,0 + + /* Read the guest SLB and save it away */ li r5, 0 - bne 3f /* for radix, save 0 entries */ + bne cr2, 3f /* for radix, save 0 entries */ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ mtctr r0 li r6,0 @@ -1712,11 +1714,6 @@ BEGIN_FTR_SECTION_NESTED(96) END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 22: - /* Clear out SLB */ - li r5,0 - slbmte r5,r5 - slbia - ptesync /* Restore host values of some registers */ BEGIN_FTR_SECTION @@ -1737,10 +1734,56 @@ BEGIN_FTR_SECTION mtspr SPRN_PID, r7 mtspr SPRN_IAMR, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + +#ifdef CONFIG_PPC_RADIX_MMU + /* + * Are we running hash or radix ? + */ + beq cr2,3f + + /* Radix: Handle the case where the guest used an illegal PID */ + LOAD_REG_ADDR(r4, mmu_base_pid) + lwz r3, VCPU_GUEST_PID(r9) + lwz r5, 0(r4) + cmpw cr0,r3,r5 + blt 2f + + /* + * Illegal PID, the HW might have prefetched and cached in the TLB + * some translations for the LPID 0 / guest PID combination which + * Linux doesn't know about, so we need to flush that PID out of + * the TLB. First we need to set LPIDR to 0 so tlbiel applies to + * the right context. + */ + li r0,0 + mtspr SPRN_LPID,r0 + isync + + /* Then do a congruence class local flush */ + ld r6,VCPU_KVM(r9) + lwz r0,KVM_TLB_SETS(r6) + mtctr r0 + li r7,0x400 /* IS field = 0b01 */ + ptesync + sldi r0,r3,32 /* RS has PID */ +1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ + addi r7,r7,0x1000 + bdnz 1b + ptesync + +2: /* Flush the ERAT on radix P9 DD1 guest exit */ BEGIN_FTR_SECTION PPC_INVALIDATE_ERAT END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) + b 4f +#endif /* CONFIG_PPC_RADIX_MMU */ + /* Hash: clear out SLB */ +3: li r5,0 + slbmte r5,r5 + slbia + ptesync +4: /* * POWER7/POWER8 guest -> host partition switch code. * We don't have to lock against tlbies but we do diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index abed1fe6992fa1bb365dfd30717bcfac3f402ee4..a75f63833284cbb089f67fa21bee0357426a2a88 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -126,9 +126,10 @@ static int hash__init_new_context(struct mm_struct *mm) static int radix__init_new_context(struct mm_struct *mm) { unsigned long rts_field; - int index; + int index, max_id; - index = alloc_context_id(1, PRTB_ENTRIES - 1); + max_id = (1 << mmu_pid_bits) - 1; + index = alloc_context_id(mmu_base_pid, max_id); if (index < 0) return index; diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 5cc50d47ce3f99f4bf331a4b6a4c4be18c46379e..671a45d86c18dd7e174ffb909084cd0a37fcdaff 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -25,6 +25,9 @@ #include +unsigned int mmu_pid_bits; +unsigned int mmu_base_pid; + static int native_register_process_table(unsigned long base, unsigned long pg_sz, unsigned long table_size) { @@ -261,11 +264,34 @@ static void __init radix_init_pgtable(void) for_each_memblock(memory, reg) WARN_ON(create_physical_mapping(reg->base, reg->base + reg->size)); + + /* Find out how many PID bits are supported */ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (!mmu_pid_bits) + mmu_pid_bits = 20; +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + /* + * When KVM is possible, we only use the top half of the + * PID space to avoid collisions between host and guest PIDs + * which can cause problems due to prefetch when exiting the + * guest with AIL=3 + */ + mmu_base_pid = 1 << (mmu_pid_bits - 1); +#else + mmu_base_pid = 1; +#endif + } else { + /* The guest uses the bottom half of the PID space */ + if (!mmu_pid_bits) + mmu_pid_bits = 19; + mmu_base_pid = 1; + } + /* * Allocate Partition table and process table for the * host. */ - BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large."); + BUG_ON(PRTB_SIZE_SHIFT > 36); process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); /* * Fill in the process table. @@ -339,6 +365,12 @@ static int __init radix_dt_scan_page_sizes(unsigned long node, if (type == NULL || strcmp(type, "cpu") != 0) return 0; + /* Find MMU PID size */ + prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size); + if (prop && size == 4) + mmu_pid_bits = be32_to_cpup(prop); + + /* Grab page size encodings */ prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); if (!prop) return 0; diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index e94fbd4c88458aa5953a271b1ef1eac0ef5c5c66..781532d7bc4d29683ab2ac72cff6f0ad7d4d01f4 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -36,7 +36,7 @@ void subpage_prot_free(struct mm_struct *mm) } } addr = 0; - for (i = 0; i < 2; ++i) { + for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) { p = spt->protptrs[i]; if (!p) continue; diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 744e0164ecf58551f036ef30e19a79638adcaa76..16ae1bbe13f09e588919f8aa55a81ed575290360 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -12,12 +12,12 @@ #include #include #include -#include +#include #include #include #include - +#include #define RIC_FLUSH_TLB 0 #define RIC_FLUSH_PWC 1 @@ -454,3 +454,44 @@ void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, else radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); } + +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) +{ + unsigned int pid = mm->context.id; + + if (unlikely(pid == MMU_NO_CONTEXT)) + return; + + /* + * If this context hasn't run on that CPU before and KVM is + * around, there's a slim chance that the guest on another + * CPU just brought in obsolete translation into the TLB of + * this CPU due to a bad prefetch using the guest PID on + * the way into the hypervisor. + * + * We work around this here. If KVM is possible, we check if + * any sibling thread is in KVM. If it is, the window may exist + * and thus we flush that PID from the core. + * + * A potential future improvement would be to mark which PIDs + * have never been used on the system and avoid it if the PID + * is new and the process has no other cpumask bit set. + */ + if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) { + int cpu = smp_processor_id(); + int sib = cpu_first_thread_sibling(cpu); + bool flush = false; + + for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) { + if (sib == cpu) + continue; + if (paca[sib].kvm_hstate.kvm_vcpu) + flush = true; + } + if (flush) + _tlbiel_pid(pid, RIC_FLUSH_ALL); + } +} +EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround); +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index d7c9b186954d931c0957908dbd7a19e3f19d0610..763ffca9628d2e983299a2d44cb007cbbefd85c9 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -89,7 +89,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, goto err; ret = of_irq_to_resource(np, 0, &res[1]); - if (!ret) + if (ret <= 0) goto err; pdev = platform_device_alloc("mpc83xx_spi", i); diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 2abee070373fb3a8b757b8d3cb269e5d0b89dff6..a553aeea7af683812ba2f5a80d65e97cda163919 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE; */ static u64 pnv_deepest_stop_psscr_val; static u64 pnv_deepest_stop_psscr_mask; +static u64 pnv_deepest_stop_flag; static bool deepest_stop_found; static int pnv_save_sprs_for_deep_states(void) @@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void) update_subcore_sibling_mask(); - if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) - pnv_save_sprs_for_deep_states(); + if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { + int rc = pnv_save_sprs_for_deep_states(); + + if (likely(!rc)) + return; + + /* + * The stop-api is unable to restore hypervisor + * resources on wakeup from platform idle states which + * lose full context. So disable such states. + */ + supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; + pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); + pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); + + if (cpu_has_feature(CPU_FTR_ARCH_300) && + (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { + /* + * Use the default stop state for CPU-Hotplug + * if available. + */ + if (default_stop_found) { + pnv_deepest_stop_psscr_val = + pnv_default_stop_val; + pnv_deepest_stop_psscr_mask = + pnv_default_stop_mask; + pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", + pnv_deepest_stop_psscr_val); + } else { /* Fallback to snooze loop for CPU-Hotplug */ + deepest_stop_found = false; + pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); + } + } + } } u32 pnv_get_supported_cpuidle_states(void) @@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu) pnv_deepest_stop_psscr_val; srr1 = power9_idle_stop(psscr); - } else if (idle_states & OPAL_PM_WINKLE_ENABLED) { + } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) && + (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) { srr1 = power7_idle_insn(PNV_THREAD_WINKLE); } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { @@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags, max_residency_ns = residency_ns[i]; pnv_deepest_stop_psscr_val = psscr_val[i]; pnv_deepest_stop_psscr_mask = psscr_mask[i]; + pnv_deepest_stop_flag = flags[i]; deepest_stop_found = true; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 437613588df15ceab2a1ce5e88649b675c495de4..b900eb1d5e174c1cdafc0a7e9620caecac98bb69 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1852,6 +1852,14 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) /* 4GB offset bypasses 32-bit space */ set_dma_offset(&pdev->dev, (1ULL << 32)); set_dma_ops(&pdev->dev, &dma_direct_ops); + } else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) { + /* + * Fail the request if a DMA mask between 32 and 64 bits + * was requested but couldn't be fulfilled. Ideally we + * would do this for 64-bits but historically we have + * always fallen back to 32-bits. + */ + return -ENOMEM; } else { dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); set_dma_ops(&pdev->dev, &dma_iommu_ops); diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index e5bf1e84047f4c3fb9746ae85b0c6593f47ab012..011ef2180fe6b8d85f4fa11a1b31e8636e29d617 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np) of_detach_node(np); of_node_put(parent); - of_node_put(np); /* Must decrement the refcount */ return 0; } diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 7317b3108a88859a91523c45f1e52c08cb22fdc4..2eb8ff0d6fca443543c32ac80ff690b4b67be1ef 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -47,10 +47,9 @@ struct mmu_table_batch { extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); -static inline void tlb_gather_mmu(struct mmu_gather *tlb, - struct mm_struct *mm, - unsigned long start, - unsigned long end) +static inline void +arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end) { tlb->mm = mm; tlb->start = start; @@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) tlb_flush_mmu_free(tlb); } -static inline void tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end) +static inline void +arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force) { + if (force) { + tlb->start = start; + tlb->end = end; + } + tlb_flush_mmu(tlb); } diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 0c82f7903fc7a7c0bfc9c7d5b02ab79ab4a2be89..c1bf75ffb8756549b1c6a5c97908ff49dd2e79aa 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -998,7 +998,7 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) psw_bits(regs.psw).ia = sfr->basic.ia; psw_bits(regs.psw).dat = sfr->basic.T; psw_bits(regs.psw).wait = sfr->basic.W; - psw_bits(regs.psw).per = sfr->basic.P; + psw_bits(regs.psw).pstate = sfr->basic.P; psw_bits(regs.psw).as = sfr->basic.AS; /* diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3f2884e99ed4ce461cdb6f08148968880e90747b..af09d3437631d348dca2f1a6699c34ed49c624ed 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1324,7 +1324,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; - int i, r = 0; + int srcu_idx, i, r = 0; if (args->flags != 0) return -EINVAL; @@ -1342,6 +1342,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) return -ENOMEM; down_read(¤t->mm->mmap_sem); + srcu_idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); if (kvm_is_error_hva(hva)) { @@ -1353,6 +1354,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (r) break; } + srcu_read_unlock(&kvm->srcu, srcu_idx); up_read(¤t->mm->mmap_sem); if (!r) { @@ -1370,7 +1372,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; - int i, r = 0; + int srcu_idx, i, r = 0; if (args->flags != 0) return -EINVAL; @@ -1396,6 +1398,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) goto out; down_read(¤t->mm->mmap_sem); + srcu_idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); if (kvm_is_error_hva(hva)) { @@ -1413,6 +1416,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (r) break; } + srcu_read_unlock(&kvm->srcu, srcu_idx); up_read(¤t->mm->mmap_sem); out: kvfree(keys); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index d4d409ba206b2e0f4ed0b88cc4f3a3ba125b3597..4a1f7366b17aeffacb6c766ab891227e2609f1b9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -591,11 +591,11 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) unsigned long ptev; pgste_t pgste; - /* Clear storage key */ + /* Clear storage key ACC and F, but set R/C */ preempt_disable(); pgste = pgste_get_lock(ptep); - pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | - PGSTE_GR_BIT | PGSTE_GC_BIT); + pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); + pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; ptev = pte_val(*ptep); if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 01c6fbc3e85b62fdec83bacea0f76a88126bfe84..1803797fc885cf799337b5d61f30ae726628b8d6 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) insn_count = bpf_jit_insn(jit, fp, i); if (insn_count < 0) return -1; - jit->addrs[i + 1] = jit->prg; /* Next instruction address */ + /* Next instruction address */ + jit->addrs[i + insn_count] = jit->prg; } bpf_jit_epilogue(jit); diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 46e0d635e36f711aff9a88c45955905d7fbf3cc2..51a8bc967e75f1e3c96a70783e9da439310edbcb 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) +arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end) { tlb->mm = mm; tlb->start = start; @@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start } static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force) { - if (tlb->fullmm) + if (tlb->fullmm || force) flush_tlb_mm(tlb->mm); /* keep the page table cache within bounds */ diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index c74d3701ad6830fe88338c185be374f7a6730f07..207a43a2d8b337ee61d8d982341946018ded5c97 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig @@ -1,4 +1,3 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 @@ -23,7 +22,6 @@ CONFIG_IP_PNP_DHCP=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y -# CONFIG_INET_LRO is not set CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m @@ -69,7 +67,6 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_ISO9660_FS=m CONFIG_PROC_KCORE=y @@ -82,7 +79,6 @@ CONFIG_NLS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_KGDB=y CONFIG_KGDB_TESTS=y CONFIG_CRYPTO_NULL=m diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index b2e650d1764f63d430d6b96eac793621d26902da..ca8609d7292ffee5ffbf068eb2b11653bf59ff7c 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -1,5 +1,4 @@ CONFIG_64BIT=y -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -184,7 +183,6 @@ CONFIG_HID_TOPSEED=y CONFIG_HID_THRUSTMASTER=y CONFIG_HID_ZEROPLUS=y CONFIG_USB=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_TT_NEWSCHED is not set CONFIG_USB_OHCI_HCD=y @@ -210,8 +208,6 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENTS=y CONFIG_KEYS=y diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 2cddcda4f85f7555dced053b1fc82fd991e19943..87841d687f8d5306663c54d050aa0ca4b2b08cd7 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm); void __tsb_context_switch(unsigned long pgd_pa, struct tsb_config *tsb_base, struct tsb_config *tsb_huge, - unsigned long tsb_descr_pa); + unsigned long tsb_descr_pa, + unsigned long secondary_ctx); -static inline void tsb_context_switch(struct mm_struct *mm) +static inline void tsb_context_switch_ctx(struct mm_struct *mm, + unsigned long ctx) { __tsb_context_switch(__pa(mm->pgd), &mm->context.tsb_block[MM_TSB_BASE], @@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm) #else NULL #endif - , __pa(&mm->context.tsb_descr[MM_TSB_BASE])); + , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), + ctx); } +#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0) + void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss); @@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * cpu0 to update it's TSB because at that point the cpu_vm_mask * only had cpu1 set in it. */ - load_secondary_context(mm); - tsb_context_switch(mm); + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); /* Any time a processor runs a context on an address space * for the first time, we must flush that context out of the diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index 1d8321c827a8821bb4e9f4989eb883cd761370db..1b1286d0506910c0f9a92ab6af14e272dd008d61 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h @@ -47,10 +47,26 @@ #define SUN4V_CHIP_NIAGARA5 0x05 #define SUN4V_CHIP_SPARC_M6 0x06 #define SUN4V_CHIP_SPARC_M7 0x07 +#define SUN4V_CHIP_SPARC_M8 0x08 #define SUN4V_CHIP_SPARC64X 0x8a #define SUN4V_CHIP_SPARC_SN 0x8b #define SUN4V_CHIP_UNKNOWN 0xff +/* + * The following CPU_ID_xxx constants are used + * to identify the CPU type in the setup phase + * (see head_64.S) + */ +#define CPU_ID_NIAGARA1 ('1') +#define CPU_ID_NIAGARA2 ('2') +#define CPU_ID_NIAGARA3 ('3') +#define CPU_ID_NIAGARA4 ('4') +#define CPU_ID_NIAGARA5 ('5') +#define CPU_ID_M6 ('6') +#define CPU_ID_M7 ('7') +#define CPU_ID_M8 ('8') +#define CPU_ID_SONOMA1 ('N') + #ifndef __ASSEMBLY__ enum ultra_tlb_layout { diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 493e023a468a919c61d77451e43e0a4a2e414bbe..ef4f18f7a67402ed8baceb2ea05ee7f6368cc404 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void) sparc_pmu_type = "sparc-m7"; break; + case SUN4V_CHIP_SPARC_M8: + sparc_cpu_type = "SPARC-M8"; + sparc_fpu_type = "SPARC-M8 integrated FPU"; + sparc_pmu_type = "sparc-m8"; + break; + case SUN4V_CHIP_SPARC_SN: sparc_cpu_type = "SPARC-SN"; sparc_fpu_type = "SPARC-SN integrated FPU"; diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c index 45c820e1cba5d949ff936f15392ca3c0c8578a34..90d550bbfeefe484f1560940f111235f26332d7a 100644 --- a/arch/sparc/kernel/cpumap.c +++ b/arch/sparc/kernel/cpumap.c @@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) case SUN4V_CHIP_NIAGARA5: case SUN4V_CHIP_SPARC_M6: case SUN4V_CHIP_SPARC_M7: + case SUN4V_CHIP_SPARC_M8: case SUN4V_CHIP_SPARC_SN: case SUN4V_CHIP_SPARC64X: rover_inc_table = niagara_iterate_method; diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 41a4073286671eff51f275bfca4ae6d9d01db74d..78e0211753d28f14f955af865704248b1e5daf24 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type) nop 70: ldub [%g1 + 7], %g2 - cmp %g2, '3' + cmp %g2, CPU_ID_NIAGARA3 be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA3, %g4 - cmp %g2, '4' + cmp %g2, CPU_ID_NIAGARA4 be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA4, %g4 - cmp %g2, '5' + cmp %g2, CPU_ID_NIAGARA5 be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA5, %g4 - cmp %g2, '6' + cmp %g2, CPU_ID_M6 be,pt %xcc, 5f mov SUN4V_CHIP_SPARC_M6, %g4 - cmp %g2, '7' + cmp %g2, CPU_ID_M7 be,pt %xcc, 5f mov SUN4V_CHIP_SPARC_M7, %g4 - cmp %g2, 'N' + cmp %g2, CPU_ID_M8 + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_M8, %g4 + cmp %g2, CPU_ID_SONOMA1 be,pt %xcc, 5f mov SUN4V_CHIP_SPARC_SN, %g4 ba,pt %xcc, 49f @@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type) 91: sethi %hi(prom_cpu_compatible), %g1 or %g1, %lo(prom_cpu_compatible), %g1 ldub [%g1 + 17], %g2 - cmp %g2, '1' + cmp %g2, CPU_ID_NIAGARA1 be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA1, %g4 - cmp %g2, '2' + cmp %g2, CPU_ID_NIAGARA2 be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA2, %g4 @@ -600,6 +603,9 @@ niagara_tlb_fixup: be,pt %xcc, niagara4_patch nop cmp %g1, SUN4V_CHIP_SPARC_M7 + be,pt %xcc, niagara4_patch + nop + cmp %g1, SUN4V_CHIP_SPARC_M8 be,pt %xcc, niagara4_patch nop cmp %g1, SUN4V_CHIP_SPARC_SN diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 4d9c3e13c15056b5d60e7ccd266b36cfe29d2c00..150ee7d4b059a69e174dff7c7d16ff906f73e1ed 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -288,10 +288,17 @@ static void __init sun4v_patch(void) sun4v_patch_2insn_range(&__sun4v_2insn_patch, &__sun4v_2insn_patch_end); - if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || - sun4v_chip_type == SUN4V_CHIP_SPARC_SN) + + switch (sun4v_chip_type) { + case SUN4V_CHIP_SPARC_M7: + case SUN4V_CHIP_SPARC_M8: + case SUN4V_CHIP_SPARC_SN: sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, &__sun_m7_2insn_patch_end); + break; + default: + break; + } sun4v_hvapi_init(); } @@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void) sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || sun4v_chip_type == SUN4V_CHIP_SPARC_SN || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= HWCAP_SPARC_BLKINIT; @@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void) sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || sun4v_chip_type == SUN4V_CHIP_SPARC_SN || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= HWCAP_SPARC_N2; @@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void) sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || sun4v_chip_type == SUN4V_CHIP_SPARC_SN || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | @@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void) sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || sun4v_chip_type == SUN4V_CHIP_SPARC_SN || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 07c0df92496034efd1262dd2b40e56ffd5486c0c..db872dbfafe943fd92ca10b9995b003ed6a1aee7 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -360,6 +360,7 @@ tsb_flush: * %o1: TSB base config pointer * %o2: TSB huge config pointer, or NULL if none * %o3: Hypervisor TSB descriptor physical address + * %o4: Secondary context to load, if non-zero * * We have to run this whole thing with interrupts * disabled so that the current cpu doesn't change @@ -372,6 +373,17 @@ __tsb_context_switch: rdpr %pstate, %g1 wrpr %g1, PSTATE_IE, %pstate + brz,pn %o4, 1f + mov SECONDARY_CONTEXT, %o5 + +661: stxa %o4, [%o5] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %o4, [%o5] ASI_MMU + .previous + flush %g6 + +1: TRAP_LOAD_TRAP_BLOCK(%g2, %g3) stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 54f98706b03b2f53025adb99e086002a0629e9f0..5a8cb37f0a3b8e31d0a4194d831bb39559bb6a51 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08) ENTRY(U3_retl_o2_and_7_plus_GS) and %o2, 7, %o2 retl - add %o2, GLOBAL_SPARE, %o2 + add %o2, GLOBAL_SPARE, %o0 ENDPROC(U3_retl_o2_and_7_plus_GS) ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) add GLOBAL_SPARE, 8, GLOBAL_SPARE and %o2, 7, %o2 retl - add %o2, GLOBAL_SPARE, %o2 + add %o2, GLOBAL_SPARE, %o0 ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) #endif diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3c40ebd50f928cbbbfe69c65c35810a78b30c53d..afa0099f374852e0cf093088d942512008a45a68 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde } #ifdef CONFIG_HUGETLB_PAGE +static void __init add_huge_page_size(unsigned long size) +{ + unsigned int order; + + if (size_to_hstate(size)) + return; + + order = ilog2(size) - PAGE_SHIFT; + hugetlb_add_hstate(order); +} + +static int __init hugetlbpage_init(void) +{ + add_huge_page_size(1UL << HPAGE_64K_SHIFT); + add_huge_page_size(1UL << HPAGE_SHIFT); + add_huge_page_size(1UL << HPAGE_256MB_SHIFT); + add_huge_page_size(1UL << HPAGE_2GB_SHIFT); + + return 0; +} + +arch_initcall(hugetlbpage_init); + static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string) goto out; } - hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT); + add_huge_page_size(hugepage_size); rc = 1; out: @@ -1921,12 +1944,22 @@ static void __init setup_page_offset(void) break; case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_SN: - default: /* M7 and later support 52-bit virtual addresses. */ sparc64_va_hole_top = 0xfff8000000000000UL; sparc64_va_hole_bottom = 0x0008000000000000UL; max_phys_bits = 49; break; + case SUN4V_CHIP_SPARC_M8: + default: + /* M8 and later support 54-bit virtual addresses. + * However, restricting M8 and above VA bits to 53 + * as 4-level page table cannot support more than + * 53 VA bits. + */ + sparc64_va_hole_top = 0xfff0000000000000UL; + sparc64_va_hole_bottom = 0x0010000000000000UL; + max_phys_bits = 51; + break; } } @@ -2138,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) */ switch (sun4v_chip_type) { case SUN4V_CHIP_SPARC_M7: + case SUN4V_CHIP_SPARC_M8: case SUN4V_CHIP_SPARC_SN: pagecv_flag = 0x00; break; @@ -2290,6 +2324,7 @@ void __init paging_init(void) */ switch (sun4v_chip_type) { case SUN4V_CHIP_SPARC_M7: + case SUN4V_CHIP_SPARC_M8: case SUN4V_CHIP_SPARC_SN: page_cache4v_flag = _PAGE_CP_4V; break; diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c index 17bd2e167e07edd934dfe9957c43712b21401c55..df707a8ad3117074805e138a3992a7e080d43e5a 100644 --- a/arch/sparc/power/hibernate.c +++ b/arch/sparc/power/hibernate.c @@ -35,6 +35,5 @@ void restore_processor_state(void) { struct mm_struct *mm = current->active_mm; - load_secondary_context(mm); - tsb_context_switch(mm); + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 600a2e9bfee2feea2a6dbc8b91d2a5a872d9d8d3..344d95619d0334659e6f4a9f3a5bff70ae95f67c 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) +arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end) { tlb->mm = mm; tlb->start = start; @@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb) tlb_flush_mmu_free(tlb); } -/* tlb_finish_mmu +/* arch_tlb_finish_mmu * Called at the end of the shootdown operation to free up any resources * that were required. */ static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force) { + if (force) { + tlb->start = start; + tlb->end = end; + tlb->need_flush = 1; + } tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5f3b756ec0d3e58728bada989a1ce454f88d6873..87602cef7aba07ce1b459356c3a554102417a949 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -99,6 +99,7 @@ config X86 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL + select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI_NMI if ACPI select HAVE_ALIGNED_STRUCT_PAGE if SLUB @@ -162,7 +163,7 @@ config X86 select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI - select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 2c860ad4fe0686a9cf796cd9a5b4453b8199223f..8a958274b54cc4bb9d17888f80aa7737fad4f618 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -34,6 +34,7 @@ KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += -mno-mmx -mno-sse KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index 630e3664906bfc399c885ff7c3525fc27bfb7218..16f49123d747b7a1c224b75c60d8a3af1e8e701c 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -16,6 +16,15 @@ #include "ctype.h" #include "string.h" +/* + * Undef these macros so that the functions that we provide + * here will have the correct names regardless of how string.h + * may have chosen to #define them. + */ +#undef memcpy +#undef memset +#undef memcmp + int memcmp(const void *s1, const void *s2, size_t len) { bool diff; diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1cd792db15efe760e3a6fc8b17b9a4c4e6f35233..1eab79c9ac484172a63d9cc0c4a409b7fffe7e8b 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -117,11 +117,10 @@ .set T1, REG_T1 .endm -#define K_BASE %r8 #define HASH_PTR %r9 +#define BLOCKS_CTR %r8 #define BUFFER_PTR %r10 #define BUFFER_PTR2 %r13 -#define BUFFER_END %r11 #define PRECALC_BUF %r14 #define WK_BUF %r15 @@ -205,14 +204,14 @@ * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ - vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP + vmovdqu (i * 2)(BUFFER_PTR), W_TMP .elseif ((i & 7) == 1) - vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\ + vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\ WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY .elseif ((i & 7) == 4) - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP .elseif ((i & 7) == 7) vmovdqu WY_TMP, PRECALC_WK(i&~7) @@ -255,7 +254,7 @@ vpxor WY, WY_TMP, WY_TMP .elseif ((i & 7) == 7) vpxor WY_TMP2, WY_TMP, WY - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY @@ -291,7 +290,7 @@ vpsrld $30, WY, WY vpor WY, WY_TMP, WY .elseif ((i & 7) == 7) - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY @@ -446,6 +445,16 @@ .endm +/* Add constant only if (%2 > %3) condition met (uses RTA as temp) + * %1 + %2 >= %3 ? %4 : 0 + */ +.macro ADD_IF_GE a, b, c, d + mov \a, RTA + add $\d, RTA + cmp $\c, \b + cmovge RTA, \a +.endm + /* * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining */ @@ -463,13 +472,16 @@ lea (2*4*80+32)(%rsp), WK_BUF # Precalc WK for first 2 blocks - PRECALC_OFFSET = 0 + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64 .set i, 0 .rept 160 PRECALC i .set i, i + 1 .endr - PRECALC_OFFSET = 128 + + /* Go to next block if needed */ + ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128 + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 xchg WK_BUF, PRECALC_BUF .align 32 @@ -479,8 +491,8 @@ _loop: * we use K_BASE value as a signal of a last block, * it is set below by: cmovae BUFFER_PTR, K_BASE */ - cmp K_BASE, BUFFER_PTR - jne _begin + test BLOCKS_CTR, BLOCKS_CTR + jnz _begin .align 32 jmp _end .align 32 @@ -512,10 +524,10 @@ _loop0: .set j, j+2 .endr - add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */ - cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */ - cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ - + /* Update Counter */ + sub $1, BLOCKS_CTR + /* Move to the next block only if needed*/ + ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128 /* * rounds * 60,62,64,66,68 @@ -532,8 +544,8 @@ _loop0: UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E - cmp K_BASE, BUFFER_PTR /* is current block the last one? */ - je _loop + test BLOCKS_CTR, BLOCKS_CTR + jz _loop mov TB, B @@ -575,10 +587,10 @@ _loop2: .set j, j+2 .endr - add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */ - - cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */ - cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ + /* update counter */ + sub $1, BLOCKS_CTR + /* Move to the next block only if needed*/ + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 jmp _loop3 _loop3: @@ -641,19 +653,12 @@ _loop3: avx2_zeroupper - lea K_XMM_AR(%rip), K_BASE - + /* Setup initial values */ mov CTX, HASH_PTR mov BUF, BUFFER_PTR - lea 64(BUF), BUFFER_PTR2 - - shl $6, CNT /* mul by 64 */ - add BUF, CNT - add $64, CNT - mov CNT, BUFFER_END - cmp BUFFER_END, BUFFER_PTR2 - cmovae K_BASE, BUFFER_PTR2 + mov BUF, BUFFER_PTR2 + mov CNT, BLOCKS_CTR xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index f960a043cdeba4a36f4ba20fddf81369f8ced38f..fc61739150e7c2c5a484c020b14441551fe5403a 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, static bool avx2_usable(void) { - if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) + if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) return true; diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a9a8027a6c0eaa748541282448f41b9eec834d6c..6d078b89a5e887de4d9cbe1c42f3ed34d9682642 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -705,6 +705,7 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_HAVE_KVM apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi +apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi #endif #ifdef CONFIG_X86_MCE_THRESHOLD @@ -1210,6 +1211,8 @@ ENTRY(nmi) * other IST entries. */ + ASM_CLAC + /* Use %rdx as our temp variable throughout */ pushq %rdx diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 8e3db8f642a7a02dd8f99db3a25dedbfd1deb01a..af12e294caeda5f518cf8b80ce5765996f1d8c56 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored) load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm)); } -static void x86_pmu_event_mapped(struct perf_event *event) +static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) { if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; @@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct perf_event *event) * For now, this can't happen because all callers hold mmap_sem * for write. If this changes, we'll need a different solution. */ - lockdep_assert_held_exclusive(¤t->mm->mmap_sem); + lockdep_assert_held_exclusive(&mm->mmap_sem); - if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) - on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); + if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) + on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1); } -static void x86_pmu_event_unmapped(struct perf_event *event) +static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) { - if (!current->mm) - return; if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; - if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed)) - on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); + if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) + on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1); } static int x86_pmu_event_idx(struct perf_event *event) diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 8ae8c5ce3a1f94debb2fe81f5652cbd7b35f2c4a..ddd8d3516bfcfa58fc3baf7b3f4f70b7231190ab 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -69,7 +69,7 @@ struct bts_buffer { struct bts_phys buf[0]; }; -struct pmu bts_pmu; +static struct pmu bts_pmu; static size_t buf_size(struct page *page) { diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index eb0533558c2b705957a30704218b983eda65e61e..d32c0eed38ca92665d378fa2d0792eed12aad818 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -587,7 +587,7 @@ static __initconst const u64 p4_hw_cache_event_ids * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are * either up to date automatically or not applicable at all. */ -struct p4_event_alias { +static struct p4_event_alias { u64 original; u64 alternative; } p4_event_aliases[] = { diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index a45e2114a8460925b44bd6e0983f3ee37e83d861..8e2457cb6b4a416e1c84d9baa990ea512ec74ba0 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -559,7 +559,7 @@ static struct attribute_group rapl_pmu_format_group = { .attrs = rapl_formats_attr, }; -const struct attribute_group *rapl_attr_groups[] = { +static const struct attribute_group *rapl_attr_groups[] = { &rapl_pmu_attr_group, &rapl_pmu_format_group, &rapl_pmu_events_group, diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 44ec523287f670dff8eee3e3c9eb1bb1606489b6..1c5390f1cf0992787afa8d34bb361f622b00dcb2 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -721,7 +721,7 @@ static struct attribute *uncore_pmu_attrs[] = { NULL, }; -static struct attribute_group uncore_pmu_attr_group = { +static const struct attribute_group uncore_pmu_attr_group = { .attrs = uncore_pmu_attrs, }; diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index cda56933200503662d133050fc47bdae050e8d61..6a5cbe90f8593eb23a7a403132240fc9afc87c9d 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -272,7 +272,7 @@ static struct attribute *nhmex_uncore_ubox_formats_attr[] = { NULL, }; -static struct attribute_group nhmex_uncore_ubox_format_group = { +static const struct attribute_group nhmex_uncore_ubox_format_group = { .name = "format", .attrs = nhmex_uncore_ubox_formats_attr, }; @@ -299,7 +299,7 @@ static struct attribute *nhmex_uncore_cbox_formats_attr[] = { NULL, }; -static struct attribute_group nhmex_uncore_cbox_format_group = { +static const struct attribute_group nhmex_uncore_cbox_format_group = { .name = "format", .attrs = nhmex_uncore_cbox_formats_attr, }; @@ -407,7 +407,7 @@ static struct attribute *nhmex_uncore_bbox_formats_attr[] = { NULL, }; -static struct attribute_group nhmex_uncore_bbox_format_group = { +static const struct attribute_group nhmex_uncore_bbox_format_group = { .name = "format", .attrs = nhmex_uncore_bbox_formats_attr, }; @@ -484,7 +484,7 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = { NULL, }; -static struct attribute_group nhmex_uncore_sbox_format_group = { +static const struct attribute_group nhmex_uncore_sbox_format_group = { .name = "format", .attrs = nhmex_uncore_sbox_formats_attr, }; @@ -898,7 +898,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = { NULL, }; -static struct attribute_group nhmex_uncore_mbox_format_group = { +static const struct attribute_group nhmex_uncore_mbox_format_group = { .name = "format", .attrs = nhmex_uncore_mbox_formats_attr, }; @@ -1163,7 +1163,7 @@ static struct attribute *nhmex_uncore_rbox_formats_attr[] = { NULL, }; -static struct attribute_group nhmex_uncore_rbox_format_group = { +static const struct attribute_group nhmex_uncore_rbox_format_group = { .name = "format", .attrs = nhmex_uncore_rbox_formats_attr, }; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index a3dcc12bef4ab3a67aab29c6acf3480920e06952..db1127ce685eb8ea687bb4e0279f13ad5706a59f 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -130,7 +130,7 @@ static struct attribute *snb_uncore_formats_attr[] = { NULL, }; -static struct attribute_group snb_uncore_format_group = { +static const struct attribute_group snb_uncore_format_group = { .name = "format", .attrs = snb_uncore_formats_attr, }; @@ -289,7 +289,7 @@ static struct attribute *snb_uncore_imc_formats_attr[] = { NULL, }; -static struct attribute_group snb_uncore_imc_format_group = { +static const struct attribute_group snb_uncore_imc_format_group = { .name = "format", .attrs = snb_uncore_imc_formats_attr, }; @@ -769,7 +769,7 @@ static struct attribute *nhm_uncore_formats_attr[] = { NULL, }; -static struct attribute_group nhm_uncore_format_group = { +static const struct attribute_group nhm_uncore_format_group = { .name = "format", .attrs = nhm_uncore_formats_attr, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index dae2fedc16015e691ad6ae85ce4bc4122011eefc..db1fe377e6dd9ddfcfbc006a7346879f263d408a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -316,7 +316,7 @@ #define SKX_UPI_PCI_PMON_CTL0 0x350 #define SKX_UPI_PCI_PMON_CTR0 0x318 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378 -#define SKX_PMON_CTL_UMASK_EXT 0xff +#define SKX_UPI_CTL_UMASK_EXT 0xffefff /* SKX M2M */ #define SKX_M2M_PCI_PMON_CTL0 0x228 @@ -328,7 +328,7 @@ DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); -DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55"); DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); @@ -351,7 +351,6 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); -DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12"); DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); @@ -603,27 +602,27 @@ static struct uncore_event_desc snbep_uncore_qpi_events[] = { { /* end: all zeroes */ }, }; -static struct attribute_group snbep_uncore_format_group = { +static const struct attribute_group snbep_uncore_format_group = { .name = "format", .attrs = snbep_uncore_formats_attr, }; -static struct attribute_group snbep_uncore_ubox_format_group = { +static const struct attribute_group snbep_uncore_ubox_format_group = { .name = "format", .attrs = snbep_uncore_ubox_formats_attr, }; -static struct attribute_group snbep_uncore_cbox_format_group = { +static const struct attribute_group snbep_uncore_cbox_format_group = { .name = "format", .attrs = snbep_uncore_cbox_formats_attr, }; -static struct attribute_group snbep_uncore_pcu_format_group = { +static const struct attribute_group snbep_uncore_pcu_format_group = { .name = "format", .attrs = snbep_uncore_pcu_formats_attr, }; -static struct attribute_group snbep_uncore_qpi_format_group = { +static const struct attribute_group snbep_uncore_qpi_format_group = { .name = "format", .attrs = snbep_uncore_qpi_formats_attr, }; @@ -1432,27 +1431,27 @@ static struct attribute *ivbep_uncore_qpi_formats_attr[] = { NULL, }; -static struct attribute_group ivbep_uncore_format_group = { +static const struct attribute_group ivbep_uncore_format_group = { .name = "format", .attrs = ivbep_uncore_formats_attr, }; -static struct attribute_group ivbep_uncore_ubox_format_group = { +static const struct attribute_group ivbep_uncore_ubox_format_group = { .name = "format", .attrs = ivbep_uncore_ubox_formats_attr, }; -static struct attribute_group ivbep_uncore_cbox_format_group = { +static const struct attribute_group ivbep_uncore_cbox_format_group = { .name = "format", .attrs = ivbep_uncore_cbox_formats_attr, }; -static struct attribute_group ivbep_uncore_pcu_format_group = { +static const struct attribute_group ivbep_uncore_pcu_format_group = { .name = "format", .attrs = ivbep_uncore_pcu_formats_attr, }; -static struct attribute_group ivbep_uncore_qpi_format_group = { +static const struct attribute_group ivbep_uncore_qpi_format_group = { .name = "format", .attrs = ivbep_uncore_qpi_formats_attr, }; @@ -1888,7 +1887,7 @@ static struct attribute *knl_uncore_ubox_formats_attr[] = { NULL, }; -static struct attribute_group knl_uncore_ubox_format_group = { +static const struct attribute_group knl_uncore_ubox_format_group = { .name = "format", .attrs = knl_uncore_ubox_formats_attr, }; @@ -1928,7 +1927,7 @@ static struct attribute *knl_uncore_cha_formats_attr[] = { NULL, }; -static struct attribute_group knl_uncore_cha_format_group = { +static const struct attribute_group knl_uncore_cha_format_group = { .name = "format", .attrs = knl_uncore_cha_formats_attr, }; @@ -2038,7 +2037,7 @@ static struct attribute *knl_uncore_pcu_formats_attr[] = { NULL, }; -static struct attribute_group knl_uncore_pcu_format_group = { +static const struct attribute_group knl_uncore_pcu_format_group = { .name = "format", .attrs = knl_uncore_pcu_formats_attr, }; @@ -2188,7 +2187,7 @@ static struct attribute *knl_uncore_irp_formats_attr[] = { NULL, }; -static struct attribute_group knl_uncore_irp_format_group = { +static const struct attribute_group knl_uncore_irp_format_group = { .name = "format", .attrs = knl_uncore_irp_formats_attr, }; @@ -2386,7 +2385,7 @@ static struct attribute *hswep_uncore_ubox_formats_attr[] = { NULL, }; -static struct attribute_group hswep_uncore_ubox_format_group = { +static const struct attribute_group hswep_uncore_ubox_format_group = { .name = "format", .attrs = hswep_uncore_ubox_formats_attr, }; @@ -2440,7 +2439,7 @@ static struct attribute *hswep_uncore_cbox_formats_attr[] = { NULL, }; -static struct attribute_group hswep_uncore_cbox_format_group = { +static const struct attribute_group hswep_uncore_cbox_format_group = { .name = "format", .attrs = hswep_uncore_cbox_formats_attr, }; @@ -2622,7 +2621,7 @@ static struct attribute *hswep_uncore_sbox_formats_attr[] = { NULL, }; -static struct attribute_group hswep_uncore_sbox_format_group = { +static const struct attribute_group hswep_uncore_sbox_format_group = { .name = "format", .attrs = hswep_uncore_sbox_formats_attr, }; @@ -3302,7 +3301,6 @@ static struct attribute *skx_uncore_cha_formats_attr[] = { &format_attr_inv.attr, &format_attr_thresh8.attr, &format_attr_filter_tid4.attr, - &format_attr_filter_link4.attr, &format_attr_filter_state5.attr, &format_attr_filter_rem.attr, &format_attr_filter_loc.attr, @@ -3312,12 +3310,11 @@ static struct attribute *skx_uncore_cha_formats_attr[] = { &format_attr_filter_opc_0.attr, &format_attr_filter_opc_1.attr, &format_attr_filter_nc.attr, - &format_attr_filter_c6.attr, &format_attr_filter_isoc.attr, NULL, }; -static struct attribute_group skx_uncore_chabox_format_group = { +static const struct attribute_group skx_uncore_chabox_format_group = { .name = "format", .attrs = skx_uncore_cha_formats_attr, }; @@ -3333,8 +3330,11 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), - SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4), + SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8), + EVENT_EXTRA_END }; static u64 skx_cha_filter_mask(int fields) @@ -3347,6 +3347,17 @@ static u64 skx_cha_filter_mask(int fields) mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; if (fields & 0x4) mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; + if (fields & 0x8) { + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC; + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC; + } return mask; } @@ -3416,7 +3427,7 @@ static struct attribute *skx_uncore_iio_formats_attr[] = { NULL, }; -static struct attribute_group skx_uncore_iio_format_group = { +static const struct attribute_group skx_uncore_iio_format_group = { .name = "format", .attrs = skx_uncore_iio_formats_attr, }; @@ -3473,7 +3484,7 @@ static struct attribute *skx_uncore_formats_attr[] = { NULL, }; -static struct attribute_group skx_uncore_format_group = { +static const struct attribute_group skx_uncore_format_group = { .name = "format", .attrs = skx_uncore_formats_attr, }; @@ -3492,6 +3503,26 @@ static struct intel_uncore_type skx_uncore_irp = { .format_group = &skx_uncore_format_group, }; +static struct attribute *skx_uncore_pcu_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_occ_invert.attr, + &format_attr_occ_edge_det.attr, + &format_attr_filter_band0.attr, + &format_attr_filter_band1.attr, + &format_attr_filter_band2.attr, + &format_attr_filter_band3.attr, + NULL, +}; + +static struct attribute_group skx_uncore_pcu_format_group = { + .name = "format", + .attrs = skx_uncore_pcu_formats_attr, +}; + static struct intel_uncore_ops skx_uncore_pcu_ops = { IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), .hw_config = hswep_pcu_hw_config, @@ -3510,7 +3541,7 @@ static struct intel_uncore_type skx_uncore_pcu = { .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, .num_shared_regs = 1, .ops = &skx_uncore_pcu_ops, - .format_group = &snbep_uncore_pcu_format_group, + .format_group = &skx_uncore_pcu_format_group, }; static struct intel_uncore_type *skx_msr_uncores[] = { @@ -3574,7 +3605,7 @@ static struct attribute *skx_upi_uncore_formats_attr[] = { NULL, }; -static struct attribute_group skx_upi_uncore_format_group = { +static const struct attribute_group skx_upi_uncore_format_group = { .name = "format", .attrs = skx_upi_uncore_formats_attr, }; @@ -3603,8 +3634,8 @@ static struct intel_uncore_type skx_uncore_upi = { .perf_ctr_bits = 48, .perf_ctr = SKX_UPI_PCI_PMON_CTR0, .event_ctl = SKX_UPI_PCI_PMON_CTL0, - .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, - .event_mask_ext = SKX_PMON_CTL_UMASK_EXT, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = SKX_UPI_CTL_UMASK_EXT, .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, .ops = &skx_upi_uncore_pci_ops, .format_group = &skx_upi_uncore_format_group, diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ca3c48c0872f4beba6b03ac92c50eb6a352b3006..5a28e8e55e36fd2164c0cda175288a7fcc534c4b 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -286,7 +286,7 @@ #define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ -#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */ +#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 1c18d83d3f094d1f5abfa09a08b67e9cb6b7376d..9aeb91935ce02387d8dae5e2f51bf1750f420a43 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -247,11 +247,11 @@ extern int force_personality32; /* * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * 64-bit, this is above 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ - 0x100000000UL) + (TASK_SIZE / 3 * 2)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index df002992d8fd3dffa9d9d0913e823ae52f9e2256..07b06955a05df90575d2d66ffec0e0b44793a587 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -25,6 +25,8 @@ BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR, smp_kvm_posted_intr_ipi) BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR, smp_kvm_posted_intr_wakeup_ipi) +BUILD_INTERRUPT3(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR, + smp_kvm_posted_intr_nested_ipi) #endif /* diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 9b76cd331990159dcdd042e10f90946f7440adba..ad1ed531febcbc3a15992be0e367f1cb4d43978e 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -15,6 +15,7 @@ typedef struct { #ifdef CONFIG_HAVE_KVM unsigned int kvm_posted_intr_ipis; unsigned int kvm_posted_intr_wakeup_ipis; + unsigned int kvm_posted_intr_nested_ipis; #endif unsigned int x86_platform_ipis; /* arch dependent */ unsigned int apic_perf_irqs; diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index b90e1053049bdd17ad36989315db8ac1b3ccc973..d6dbafbd420737a5ea5363c700940acb22a8c358 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -30,6 +30,7 @@ extern asmlinkage void apic_timer_interrupt(void); extern asmlinkage void x86_platform_ipi(void); extern asmlinkage void kvm_posted_intr_ipi(void); extern asmlinkage void kvm_posted_intr_wakeup_ipi(void); +extern asmlinkage void kvm_posted_intr_nested_ipi(void); extern asmlinkage void error_interrupt(void); extern asmlinkage void irq_work_interrupt(void); @@ -62,6 +63,7 @@ extern void trace_call_function_single_interrupt(void); #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi #define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi +#define trace_kvm_posted_intr_nested_ipi kvm_posted_intr_nested_ipi #endif /* CONFIG_TRACING */ #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 21126155a739f4a3495499f052abcd3eab5a0bb7..0ead9dbb91301d0f7f8923dcf33f25515bd182b8 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -43,6 +43,9 @@ struct hypervisor_x86 { /* pin current vcpu to specified physical cpu (run rarely) */ void (*pin_vcpu)(int); + + /* called during init_mem_mapping() to setup early mappings. */ + void (*init_mem_mapping)(void); }; extern const struct hypervisor_x86 *x86_hyper; @@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm; extern void init_hypervisor_platform(void); extern bool hypervisor_x2apic_available(void); extern void hypervisor_pin_vcpu(int cpu); + +static inline void hypervisor_init_mem_mapping(void) +{ + if (x86_hyper && x86_hyper->init_mem_mapping) + x86_hyper->init_mem_mapping(); +} #else static inline void init_hypervisor_platform(void) { } static inline bool hypervisor_x2apic_available(void) { return false; } +static inline void hypervisor_init_mem_mapping(void) { } #endif /* CONFIG_HYPERVISOR_GUEST */ #endif /* _ASM_X86_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6ca9fd6234e13b83de1bd495d0c4f7231ba439d4..aaf8d28b5d008969786ee60522d0d2f2a2fae126 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -83,7 +83,6 @@ */ #define X86_PLATFORM_IPI_VECTOR 0xf7 -#define POSTED_INTR_WAKEUP_VECTOR 0xf1 /* * IRQ work vector: */ @@ -98,6 +97,8 @@ /* Vector for KVM to deliver posted interrupt IPI */ #ifdef CONFIG_HAVE_KVM #define POSTED_INTR_VECTOR 0xf2 +#define POSTED_INTR_WAKEUP_VECTOR 0xf1 +#define POSTED_INTR_NESTED_VECTOR 0xf0 #endif /* diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index d869c8671e364d55e343322b410fec9a3e5f66c6..0ee83321a3136fcca7a00a3b7e6c375e7a51e13f 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -8,20 +8,25 @@ * This file is licensed under GPLv2. */ -#include +#include +#include #include #include #include struct aperfmperf_sample { unsigned int khz; - unsigned long jiffies; + ktime_t time; u64 aperf; u64 mperf; }; static DEFINE_PER_CPU(struct aperfmperf_sample, samples); +#define APERFMPERF_CACHE_THRESHOLD_MS 10 +#define APERFMPERF_REFRESH_DELAY_MS 20 +#define APERFMPERF_STALE_THRESHOLD_MS 1000 + /* * aperfmperf_snapshot_khz() * On the current CPU, snapshot APERF, MPERF, and jiffies @@ -33,13 +38,18 @@ static void aperfmperf_snapshot_khz(void *dummy) u64 aperf, aperf_delta; u64 mperf, mperf_delta; struct aperfmperf_sample *s = this_cpu_ptr(&samples); + ktime_t now = ktime_get(); + s64 time_delta = ktime_ms_delta(now, s->time); + unsigned long flags; - /* Don't bother re-computing within 10 ms */ - if (time_before(jiffies, s->jiffies + HZ/100)) + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) return; + local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + local_irq_restore(flags); aperf_delta = aperf - s->aperf; mperf_delta = mperf - s->mperf; @@ -51,22 +61,21 @@ static void aperfmperf_snapshot_khz(void *dummy) if (mperf_delta == 0) return; - /* - * if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then - * khz = (cpu_khz * aperf_delta) / mperf_delta - */ - if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta) - s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); - else /* khz = aperf_delta / (mperf_delta / cpu_khz) */ - s->khz = div64_u64(aperf_delta, - div64_u64(mperf_delta, cpu_khz)); - s->jiffies = jiffies; + s->time = now; s->aperf = aperf; s->mperf = mperf; + + /* If the previous iteration was too long ago, discard it. */ + if (time_delta > APERFMPERF_STALE_THRESHOLD_MS) + s->khz = 0; + else + s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); } unsigned int arch_freq_get_on_cpu(int cpu) { + unsigned int khz; + if (!cpu_khz) return 0; @@ -74,6 +83,12 @@ unsigned int arch_freq_get_on_cpu(int cpu) return 0; smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); + khz = per_cpu(samples.khz, cpu); + if (khz) + return khz; + + msleep(APERFMPERF_REFRESH_DELAY_MS); + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); return per_cpu(samples.khz, cpu); } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index d7cc190ae45719bf84d721e781dc7fde00c85e14..f7370abd33c67570ad713a03a20f9114e6ad6399 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -122,7 +122,7 @@ static struct attribute *thermal_throttle_attrs[] = { NULL }; -static struct attribute_group thermal_attr_group = { +static const struct attribute_group thermal_attr_group = { .attrs = thermal_throttle_attrs, .name = "thermal_throttle" }; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 9cb98ee103db1bf0cdc2349614fe0d4e59cfb21f..86e8f0b2537b3eaedd2da204d67c94947a1b16f1 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -561,7 +561,7 @@ static struct attribute *mc_default_attrs[] = { NULL }; -static struct attribute_group mc_attr_group = { +static const struct attribute_group mc_attr_group = { .attrs = mc_default_attrs, .name = "microcode", }; @@ -707,7 +707,7 @@ static struct attribute *cpu_root_microcode_attrs[] = { NULL }; -static struct attribute_group cpu_root_microcode_group = { +static const struct attribute_group cpu_root_microcode_group = { .name = "microcode", .attrs = cpu_root_microcode_attrs, }; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index c5bb63be4ba1e6a2df1203fe73079faad9fc62e7..40d5a8a752125ed5d26a7605d5eabad572879bfc 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -237,6 +237,18 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask); } +static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) +{ + struct set_mtrr_data data = { .smp_reg = reg, + .smp_base = base, + .smp_size = size, + .smp_type = type + }; + + stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask); +} + static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { @@ -370,7 +382,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, /* Search for an empty MTRR */ i = mtrr_if->get_free_region(base, size, replace); if (i >= 0) { - set_mtrr(i, base, size, type); + set_mtrr_cpuslocked(i, base, size, type); if (likely(replace < 0)) { mtrr_usage_table[i] = 1; } else { @@ -378,7 +390,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, if (increment) mtrr_usage_table[i]++; if (unlikely(replace != i)) { - set_mtrr(replace, 0, 0, 0); + set_mtrr_cpuslocked(replace, 0, 0, 0); mtrr_usage_table[replace] = 0; } } @@ -506,7 +518,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) goto out; } if (--mtrr_usage_table[reg] < 1) - set_mtrr(reg, 0, 0, 0); + set_mtrr_cpuslocked(reg, 0, 0, 0); error = reg; out: mutex_unlock(&mtrr_mutex); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 46c3c73e7f43f5fbb56fe0028d8aafb64a3f8a9e..9ba79543d9ee9f1ee19bce38955467afa04b125f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -53,6 +53,7 @@ void __head __startup_64(unsigned long physaddr) pudval_t *pud; pmdval_t *pmd, pmd_entry; int i; + unsigned int *next_pgt_ptr; /* Is the address too large? */ if (physaddr >> MAX_PHYSMEM_BITS) @@ -91,9 +92,9 @@ void __head __startup_64(unsigned long physaddr) * creates a bunch of nonsense entries but that is fine -- * it avoids problems around wraparound. */ - - pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); - pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); + next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr); + pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr); + pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr); if (IS_ENABLED(CONFIG_X86_5LEVEL)) { p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 16f82a3aaec7c9c8d85e7c8b4f805da396981fce..8ce4212e2b8d0f139e543e05e7e284ee25ee856d 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -345,21 +345,10 @@ static int hpet_shutdown(struct clock_event_device *evt, int timer) return 0; } -static int hpet_resume(struct clock_event_device *evt, int timer) -{ - if (!timer) { - hpet_enable_legacy_int(); - } else { - struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); - - irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); - irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); - disable_hardirq(hdev->irq); - irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); - enable_irq(hdev->irq); - } +static int hpet_resume(struct clock_event_device *evt) +{ + hpet_enable_legacy_int(); hpet_print_config(); - return 0; } @@ -417,7 +406,7 @@ static int hpet_legacy_set_periodic(struct clock_event_device *evt) static int hpet_legacy_resume(struct clock_event_device *evt) { - return hpet_resume(evt, 0); + return hpet_resume(evt); } static int hpet_legacy_next_event(unsigned long delta, @@ -510,8 +499,14 @@ static int hpet_msi_set_periodic(struct clock_event_device *evt) static int hpet_msi_resume(struct clock_event_device *evt) { struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); + struct irq_data *data = irq_get_irq_data(hdev->irq); + struct msi_msg msg; - return hpet_resume(evt, hdev->num); + /* Restore the MSI msg and unmask the interrupt */ + irq_chip_compose_msi_msg(data, &msg); + hpet_msi_write(hdev, &msg); + hpet_msi_unmask(data); + return 0; } static int hpet_msi_next_event(unsigned long delta, diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 4aa03c5a14c905e9c1a1db283d61ac56372041c9..4ed0aba8dbc83b6ef83aed558ae4c459009d9146 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -155,6 +155,12 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); seq_puts(p, " Posted-interrupt notification event\n"); + seq_printf(p, "%*s: ", prec, "NPI"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + irq_stats(j)->kvm_posted_intr_nested_ipis); + seq_puts(p, " Nested posted-interrupt event\n"); + seq_printf(p, "%*s: ", prec, "PIW"); for_each_online_cpu(j) seq_printf(p, "%10u ", @@ -313,6 +319,19 @@ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs) exiting_irq(); set_irq_regs(old_regs); } + +/* + * Handler for POSTED_INTERRUPT_NESTED_VECTOR. + */ +__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + entering_ack_irq(); + inc_irq_stat(kvm_posted_intr_nested_ipis); + exiting_irq(); + set_irq_regs(old_regs); +} #endif __visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs) diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 7468c69875477e876dafd5209f0cd83c6939bce9..c7fd18526c3e3087dee25af3ab0c3f3ff7dc616b 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -150,6 +150,8 @@ static void __init apic_intr_init(void) alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi); /* IPI for KVM to deliver interrupt to wake up tasks */ alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi); + /* IPI for KVM to deliver nested posted interrupt */ + alloc_intr_gate(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi); #endif /* IPI vectors for APIC spurious and error interrupts */ diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6b877807598b9028527a2357fef630431021c383..f0153714ddac6b2305645ef5c0e35fd3a6c9fb2c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -457,6 +457,8 @@ static int arch_copy_kprobe(struct kprobe *p) int arch_prepare_kprobe(struct kprobe *p) { + int ret; + if (alternatives_text_reserved(p->addr, p->addr)) return -EINVAL; @@ -467,7 +469,13 @@ int arch_prepare_kprobe(struct kprobe *p) if (!p->ainsn.insn) return -ENOMEM; - return arch_copy_kprobe(p); + ret = arch_copy_kprobe(p); + if (ret) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } + + return ret; } void arch_arm_kprobe(struct kprobe *p) diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index 4afc67f5facc497606e2fb898ba7b7d402f04caa..06e1ff5562c0b4a5a28f3c052ed74b545c7ccca0 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -55,7 +55,7 @@ static struct bin_attribute *boot_params_data_attrs[] = { NULL, }; -static struct attribute_group boot_params_attr_group = { +static const struct attribute_group boot_params_attr_group = { .attrs = boot_params_version_attrs, .bin_attrs = boot_params_data_attrs, }; @@ -202,7 +202,7 @@ static struct bin_attribute *setup_data_data_attrs[] = { NULL, }; -static struct attribute_group setup_data_attr_group = { +static const struct attribute_group setup_data_attr_group = { .attrs = setup_data_type_attrs, .bin_attrs = setup_data_data_attrs, }; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 71c17a5be983524ea277718b759ff1511a9a3214..d04e30e3c0ffd3f255f50e595c0ab0f919fddc70 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token) if (hlist_unhashed(&n.link)) break; + rcu_irq_exit(); + if (!n.halted) { local_irq_enable(); schedule(); @@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token) /* * We cannot reschedule. So halt. */ - rcu_irq_exit(); native_safe_halt(); local_irq_disable(); - rcu_irq_enter(); } + + rcu_irq_enter(); } if (!n.halted) finish_swait(&n.wq, &wait); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 67393fc883534f47a5d5c4d92478da9f0601ef53..a56bf6051f4e3707594b1b5ddab9711da9c27117 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -471,12 +471,12 @@ static int __init reboot_init(void) /* * The DMI quirks table takes precedence. If no quirks entry - * matches and the ACPI Hardware Reduced bit is set, force EFI - * reboot. + * matches and the ACPI Hardware Reduced bit is set and EFI + * runtime services are enabled, force EFI reboot. */ rv = dmi_check_system(reboot_dmi_table); - if (!rv && efi_reboot_required()) + if (!rv && efi_reboot_required() && !efi_runtime_disabled()) reboot_type = BOOT_EFI; return 0; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b474c8de7fba09cfe27d1e0124f694d152654570..54b9e89d4d6be3844b8b3c310433467d0e5f1481 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -971,7 +971,8 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle) * Returns zero if CPU booted OK, else error code from * ->wakeup_secondary_cpu. */ -static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) +static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, + int *cpu0_nmi_registered) { volatile u32 *trampoline_status = (volatile u32 *) __va(real_mode_header->trampoline_status); @@ -979,7 +980,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) unsigned long start_ip = real_mode_header->trampoline_start; unsigned long boot_error = 0; - int cpu0_nmi_registered = 0; unsigned long timeout; idle->thread.sp = (unsigned long)task_pt_regs(idle); @@ -1035,7 +1035,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); else boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, - &cpu0_nmi_registered); + cpu0_nmi_registered); if (!boot_error) { /* @@ -1080,12 +1080,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) */ smpboot_restore_warm_reset_vector(); } - /* - * Clean up the nmi handler. Do this after the callin and callout sync - * to avoid impact of possible long unregister time. - */ - if (cpu0_nmi_registered) - unregister_nmi_handler(NMI_LOCAL, "wake_cpu0"); return boot_error; } @@ -1093,8 +1087,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) int native_cpu_up(unsigned int cpu, struct task_struct *tidle) { int apicid = apic->cpu_present_to_apicid(cpu); + int cpu0_nmi_registered = 0; unsigned long flags; - int err; + int err, ret = 0; WARN_ON(irqs_disabled()); @@ -1131,10 +1126,11 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) common_cpu_up(cpu, tidle); - err = do_boot_cpu(apicid, cpu, tidle); + err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered); if (err) { pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); - return -EIO; + ret = -EIO; + goto unreg_nmi; } /* @@ -1150,7 +1146,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) touch_nmi_watchdog(); } - return 0; +unreg_nmi: + /* + * Clean up the nmi handler. Do this after the callin and callout sync + * to avoid impact of possible long unregister time. + */ + if (cpu0_nmi_registered) + unregister_nmi_handler(NMI_LOCAL, "wake_cpu0"); + + return ret; } /** diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 2819d4c123eb89c0e00e9785239f19e2b79d6f73..589dcc117086ffdfda31920cfdddfc462bd468c9 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1495,11 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); static void cancel_hv_timer(struct kvm_lapic *apic) { + WARN_ON(preemptible()); WARN_ON(!apic->lapic_timer.hv_timer_in_use); - preempt_disable(); kvm_x86_ops->cancel_hv_timer(apic->vcpu); apic->lapic_timer.hv_timer_in_use = false; - preempt_enable(); } static bool start_hv_timer(struct kvm_lapic *apic) @@ -1507,6 +1506,7 @@ static bool start_hv_timer(struct kvm_lapic *apic) struct kvm_timer *ktimer = &apic->lapic_timer; int r; + WARN_ON(preemptible()); if (!kvm_x86_ops->set_hv_timer) return false; @@ -1538,6 +1538,8 @@ static bool start_hv_timer(struct kvm_lapic *apic) static void start_sw_timer(struct kvm_lapic *apic) { struct kvm_timer *ktimer = &apic->lapic_timer; + + WARN_ON(preemptible()); if (apic->lapic_timer.hv_timer_in_use) cancel_hv_timer(apic); if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) @@ -1552,15 +1554,20 @@ static void start_sw_timer(struct kvm_lapic *apic) static void restart_apic_timer(struct kvm_lapic *apic) { + preempt_disable(); if (!start_hv_timer(apic)) start_sw_timer(apic); + preempt_enable(); } void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - WARN_ON(!apic->lapic_timer.hv_timer_in_use); + preempt_disable(); + /* If the preempt notifier has already run, it also called apic_timer_expired */ + if (!apic->lapic_timer.hv_timer_in_use) + goto out; WARN_ON(swait_active(&vcpu->wq)); cancel_hv_timer(apic); apic_timer_expired(apic); @@ -1569,6 +1576,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) advance_periodic_target_expiration(apic); restart_apic_timer(apic); } +out: + preempt_enable(); } EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); @@ -1582,9 +1591,11 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; + preempt_disable(); /* Possibly the TSC deadline timer is not enabled yet */ if (apic->lapic_timer.hv_timer_in_use) start_sw_timer(apic); + preempt_enable(); } EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4d8141e533c369711df245d0a783683598ad4559..56ba05312759d3ed4568e546492d9d8bfad05b71 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1100,7 +1100,7 @@ static __init int svm_hardware_setup(void) if (vls) { if (!npt_enabled || - !boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) || + !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || !IS_ENABLED(CONFIG_X86_64)) { vls = false; } else { @@ -2430,6 +2430,16 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = error_code; + + /* + * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception. + * The fix is to add the ancillary datum (CR2 or DR6) to structs + * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be + * written only when inject_pending_event runs (DR6 would written here + * too). This should be conditional on a new capability---if the + * capability is disabled, kvm_multiple_exception would write the + * ancillary information to CR2 or DR6, for backwards ABI-compatibility. + */ if (svm->vcpu.arch.exception.nested_apf) svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; else diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 29fd8af5c347a6c642e131458fcfa254b6d7d854..9b21b12230354e334900e6536b7612285f75b7e3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -416,13 +416,10 @@ struct nested_vmx { /* The guest-physical address of the current VMCS L1 keeps for L2 */ gpa_t current_vmptr; - /* The host-usable pointer to the above */ - struct page *current_vmcs12_page; - struct vmcs12 *current_vmcs12; /* * Cache of the guest's VMCS, existing outside of guest memory. * Loaded from guest memory during VMPTRLD. Flushed to guest - * memory during VMXOFF, VMCLEAR, VMPTRLD. + * memory during VMCLEAR and VMPTRLD. */ struct vmcs12 *cached_vmcs12; /* @@ -563,7 +560,6 @@ struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; - bool nmi_known_unmasked; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; @@ -928,6 +924,10 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); +static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); +static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); +static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, + u16 error_code); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -2429,6 +2429,30 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) vmx_set_interrupt_shadow(vcpu, 0); } +static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, + unsigned long exit_qual) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned int nr = vcpu->arch.exception.nr; + u32 intr_info = nr | INTR_INFO_VALID_MASK; + + if (vcpu->arch.exception.has_error_code) { + vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; + intr_info |= INTR_INFO_DELIVER_CODE_MASK; + } + + if (kvm_exception_is_soft(nr)) + intr_info |= INTR_TYPE_SOFT_EXCEPTION; + else + intr_info |= INTR_TYPE_HARD_EXCEPTION; + + if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && + vmx_get_nmi_mask(vcpu)) + intr_info |= INTR_INFO_UNBLOCK_NMI; + + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); +} + /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. @@ -2438,23 +2462,38 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu) struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; - if (!((vmcs12->exception_bitmap & (1u << nr)) || - (nr == PF_VECTOR && vcpu->arch.exception.nested_apf))) - return 0; + if (nr == PF_VECTOR) { + if (vcpu->arch.exception.nested_apf) { + nested_vmx_inject_exception_vmexit(vcpu, + vcpu->arch.apf.nested_apf_token); + return 1; + } + /* + * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception. + * The fix is to add the ancillary datum (CR2 or DR6) to structs + * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 + * can be written only when inject_pending_event runs. This should be + * conditional on a new capability---if the capability is disabled, + * kvm_multiple_exception would write the ancillary information to + * CR2 or DR6, for backwards ABI-compatibility. + */ + if (nested_vmx_is_page_fault_vmexit(vmcs12, + vcpu->arch.exception.error_code)) { + nested_vmx_inject_exception_vmexit(vcpu, vcpu->arch.cr2); + return 1; + } + } else { + unsigned long exit_qual = 0; + if (nr == DB_VECTOR) + exit_qual = vcpu->arch.dr6; - if (vcpu->arch.exception.nested_apf) { - vmcs_write32(VM_EXIT_INTR_ERROR_CODE, vcpu->arch.exception.error_code); - nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, - PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | - INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, - vcpu->arch.apf.nested_apf_token); - return 1; + if (vmcs12->exception_bitmap & (1u << nr)) { + nested_vmx_inject_exception_vmexit(vcpu, exit_qual); + return 1; + } } - nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, - vmcs_read32(VM_EXIT_INTR_INFO), - vmcs_readl(EXIT_QUALIFICATION)); - return 1; + return 0; } static void vmx_queue_exception(struct kvm_vcpu *vcpu) @@ -2668,7 +2707,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) * reason is that if one of these bits is necessary, it will appear * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * fields of vmcs01 and vmcs02, will turn these bits off - and - * nested_vmx_exit_handled() will not pass related exits to L1. + * nested_vmx_exit_reflected() will not pass related exits to L1. * These rules have exceptions below. */ @@ -4956,6 +4995,28 @@ static bool vmx_get_enable_apicv(void) return enable_apicv; } +static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + gfn_t gfn; + + /* + * Don't need to mark the APIC access page dirty; it is never + * written to by the CPU during APIC virtualization. + */ + + if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { + gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; + kvm_vcpu_mark_page_dirty(vcpu, gfn); + } + + if (nested_cpu_has_posted_intr(vmcs12)) { + gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; + kvm_vcpu_mark_page_dirty(vcpu, gfn); + } +} + + static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -4963,18 +5024,15 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) void *vapic_page; u16 status; - if (vmx->nested.pi_desc && - vmx->nested.pi_pending) { - vmx->nested.pi_pending = false; - if (!pi_test_and_clear_on(vmx->nested.pi_desc)) - return; - - max_irr = find_last_bit( - (unsigned long *)vmx->nested.pi_desc->pir, 256); + if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) + return; - if (max_irr == 256) - return; + vmx->nested.pi_pending = false; + if (!pi_test_and_clear_on(vmx->nested.pi_desc)) + return; + max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); + if (max_irr != 256) { vapic_page = kmap(vmx->nested.virtual_apic_page); __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); kunmap(vmx->nested.virtual_apic_page); @@ -4986,11 +5044,16 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) vmcs_write16(GUEST_INTR_STATUS, status); } } + + nested_mark_vmcs12_pages_dirty(vcpu); } -static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, + bool nested) { #ifdef CONFIG_SMP + int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; + if (vcpu->mode == IN_GUEST_MODE) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -5008,8 +5071,7 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) */ WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); - apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), - POSTED_INTR_VECTOR); + apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); return true; } #endif @@ -5024,7 +5086,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { /* the PIR and ON have been set by L1. */ - kvm_vcpu_trigger_posted_interrupt(vcpu); + kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. @@ -5058,7 +5120,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) if (pi_test_and_set_on(&vmx->pi_desc)) return; - if (!kvm_vcpu_trigger_posted_interrupt(vcpu)) + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); } @@ -7133,34 +7195,32 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) return 1; } +static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) +{ + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); + vmcs_write64(VMCS_LINK_POINTER, -1ull); +} + static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { if (vmx->nested.current_vmptr == -1ull) return; - /* current_vmptr and current_vmcs12 are always set/reset together */ - if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) - return; - if (enable_shadow_vmcs) { /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; - vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_SHADOW_VMCS); - vmcs_write64(VMCS_LINK_POINTER, -1ull); + vmx_disable_shadow_vmcs(vmx); } vmx->nested.posted_intr_nv = -1; /* Flush VMCS12 to guest memory */ - memcpy(vmx->nested.current_vmcs12, vmx->nested.cached_vmcs12, - VMCS12_SIZE); + kvm_vcpu_write_guest_page(&vmx->vcpu, + vmx->nested.current_vmptr >> PAGE_SHIFT, + vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); - kunmap(vmx->nested.current_vmcs12_page); - nested_release_page(vmx->nested.current_vmcs12_page); vmx->nested.current_vmptr = -1ull; - vmx->nested.current_vmcs12 = NULL; } /* @@ -7174,12 +7234,14 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.vmxon = false; free_vpid(vmx->nested.vpid02); - nested_release_vmcs12(vmx); + vmx->nested.posted_intr_nv = -1; + vmx->nested.current_vmptr = -1ull; if (vmx->nested.msr_bitmap) { free_page((unsigned long)vmx->nested.msr_bitmap); vmx->nested.msr_bitmap = NULL; } if (enable_shadow_vmcs) { + vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); free_vmcs(vmx->vmcs01.shadow_vmcs); vmx->vmcs01.shadow_vmcs = NULL; @@ -7578,14 +7640,14 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) } nested_release_vmcs12(vmx); - vmx->nested.current_vmcs12 = new_vmcs12; - vmx->nested.current_vmcs12_page = page; /* * Load VMCS12 from guest memory since it is not already * cached. */ - memcpy(vmx->nested.cached_vmcs12, - vmx->nested.current_vmcs12, VMCS12_SIZE); + memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); + kunmap(page); + nested_release_page_clean(page); + set_current_vmptr(vmx, vmptr); } @@ -8018,12 +8080,11 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, * should handle it ourselves in L0 (and then continue L2). Only call this * when in is_guest_mode (L2). */ -static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) +static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) { u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - u32 exit_reason = vmx->exit_reason; trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, vmcs_readl(EXIT_QUALIFICATION), @@ -8032,6 +8093,18 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); + /* + * The host physical addresses of some pages of guest memory + * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU + * may write to these pages via their host physical address while + * L2 is running, bypassing any address-translation-based dirty + * tracking (e.g. EPT write protection). + * + * Mark them dirty on every exit from L2 to prevent them from + * getting out of sync with dirty tracking. + */ + nested_mark_vmcs12_pages_dirty(vcpu); + if (vmx->nested.nested_run_pending) return false; @@ -8168,6 +8241,29 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) } } +static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason) +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + /* + * At this point, the exit interruption info in exit_intr_info + * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT + * we need to query the in-kernel LAPIC. + */ + WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); + if ((exit_intr_info & + (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == + (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + vmcs12->vm_exit_intr_error_code = + vmcs_read32(VM_EXIT_INTR_ERROR_CODE); + } + + nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, + vmcs_readl(EXIT_QUALIFICATION)); + return 1; +} + static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); @@ -8414,12 +8510,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); - if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { - nested_vmx_vmexit(vcpu, exit_reason, - vmcs_read32(VM_EXIT_INTR_INFO), - vmcs_readl(EXIT_QUALIFICATION)); - return 1; - } + if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) + return nested_vmx_reflect_vmexit(vcpu, exit_reason); if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { dump_vmcs(); @@ -9222,7 +9314,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; - vmx->nested.current_vmcs12 = NULL; vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; @@ -9508,12 +9599,15 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, WARN_ON(!is_guest_mode(vcpu)); - if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, - vmcs_read32(VM_EXIT_INTR_INFO), - vmcs_readl(EXIT_QUALIFICATION)); - else + if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { + vmcs12->vm_exit_intr_error_code = fault->error_code; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, + PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | + INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, + fault->address); + } else { kvm_inject_page_fault(vcpu, fault); + } } static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, @@ -10041,6 +10135,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); } else { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } @@ -10065,13 +10161,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, /* Posted interrupts setting is only taken from vmcs12. */ if (nested_cpu_has_posted_intr(vmcs12)) { - /* - * Note that we use L0's vector here and in - * vmx_deliver_nested_posted_interrupt. - */ vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; vmx->nested.pi_pending = false; - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); } else { exec_control &= ~PIN_BASED_POSTED_INTR; } @@ -10095,12 +10187,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. - * - * A problem with this approach (when !enable_ept) is that L1 may be - * injected with more page faults than it asked for. This could have - * caused problems, but in practice existing hypervisors don't care. - * To fix this, we will need to emulate the PFEC checking (on the L1 - * page tables), using walk_addr(), when injecting PFs to L1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); @@ -10848,13 +10934,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; - vmcs12->vm_exit_intr_info = exit_intr_info; - if ((vmcs12->vm_exit_intr_info & - (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == - (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) - vmcs12->vm_exit_intr_error_code = - vmcs_read32(VM_EXIT_INTR_ERROR_CODE); + vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); @@ -10942,7 +11023,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, */ vmx_flush_tlb(vcpu); } - + /* Restore posted intr vector. */ + if (nested_cpu_has_posted_intr(vmcs12)) + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); @@ -11048,8 +11131,15 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmx_switch_vmcs(vcpu, &vmx->vmcs01); - if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) - && nested_exit_intr_ack_set(vcpu)) { + /* + * TODO: SDM says that with acknowledge interrupt on exit, bit 31 of + * the VM-exit interrupt information (valid interrupt) is always set to + * 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need + * kvm_cpu_has_interrupt(). See the commit message for details. + */ + if (nested_exit_intr_ack_set(vcpu) && + exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && + kvm_cpu_has_interrupt(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 82a63c59f77b52fb7a9fa6ce49513c77ce7c21fa..d734aa8c5b4f7290e365badd00ea962fd0af9acd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -597,8 +597,8 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) (unsigned long *)&vcpu->arch.regs_avail)) return true; - gfn = (kvm_read_cr3(vcpu) & ~31ul) >> PAGE_SHIFT; - offset = (kvm_read_cr3(vcpu) & ~31ul) & (PAGE_SIZE - 1); + gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; + offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) @@ -3159,15 +3159,18 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, kvm_set_hflags(vcpu, hflags); vcpu->arch.smi_pending = events->smi.pending; - if (events->smi.smm_inside_nmi) - vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; - else - vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; - if (lapic_in_kernel(vcpu)) { - if (events->smi.latched_init) - set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); + + if (events->smi.smm) { + if (events->smi.smm_inside_nmi) + vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else - clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); + vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; + if (lapic_in_kernel(vcpu)) { + if (events->smi.latched_init) + set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); + else + clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); + } } } @@ -6215,6 +6218,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) lapic_irq.shorthand = 0; lapic_irq.dest_mode = 0; + lapic_irq.level = 0; lapic_irq.dest_id = apicid; lapic_irq.msi_redir_hint = false; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 673541eb3b3f16c8c029349d597d67f4bb83a77a..bf3f1065d6addb88b898ba3a86089cccff6ed15e 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -18,6 +18,7 @@ #include /* for MAX_DMA_PFN */ #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -636,6 +637,8 @@ void __init init_mem_mapping(void) load_cr3(swapper_pg_dir); __flush_tlb_all(); + hypervisor_init_mem_mapping(); + early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 229d04a83f8561ec08e394f57d8cb5d671bd56ff..a88cfbfbd0781a4d20c398d7ca1b30f50f05be43 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void) static unsigned long stack_maxrandom_size(unsigned long task_size) { unsigned long max = 0; - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) { + if (current->flags & PF_RANDOMIZE) { max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit()); max <<= PAGE_SHIFT; } @@ -79,13 +78,13 @@ static int mmap_is_legacy(void) static unsigned long arch_rnd(unsigned int rndbits) { + if (!(current->flags & PF_RANDOMIZE)) + return 0; return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT; } unsigned long arch_mmap_rnd(void) { - if (!(current->flags & PF_RANDOMIZE)) - return 0; return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits); } diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 3e4bdb442fbcfc783059be96efe4d4e2d6b58b47..f44c0bc95aa2f45ad42462a5f23f4db4672d1257 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -26,7 +26,7 @@ static struct bau_operations ops __ro_after_init; /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ -static int timeout_base_ns[] = { +static const int timeout_base_ns[] = { 20, 160, 1280, @@ -1216,7 +1216,7 @@ static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg, * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register. * Such a message must be ignored. */ -void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) +static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) { unsigned long mmr_image; unsigned char swack_vec; diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 87d791356ea9052f79f23e00f5d0024ff8cf0b7f..de503c225ae1f194b10c71b44528ad2a2a7a4c0d 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -21,38 +22,50 @@ #include "mmu.h" #include "smp.h" -void __ref xen_hvm_init_shared_info(void) +static unsigned long shared_info_pfn; + +void xen_hvm_init_shared_info(void) { struct xen_add_to_physmap xatp; - u64 pa; - - if (HYPERVISOR_shared_info == &xen_dummy_shared_info) { - /* - * Search for a free page starting at 4kB physical address. - * Low memory is preferred to avoid an EPT large page split up - * by the mapping. - * Starting below X86_RESERVE_LOW (usually 64kB) is fine as - * the BIOS used for HVM guests is well behaved and won't - * clobber memory other than the first 4kB. - */ - for (pa = PAGE_SIZE; - !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) || - memblock_is_reserved(pa); - pa += PAGE_SIZE) - ; - - memblock_reserve(pa, PAGE_SIZE); - HYPERVISOR_shared_info = __va(pa); - } xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info); + xatp.gpfn = shared_info_pfn; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } +static void __init reserve_shared_info(void) +{ + u64 pa; + + /* + * Search for a free page starting at 4kB physical address. + * Low memory is preferred to avoid an EPT large page split up + * by the mapping. + * Starting below X86_RESERVE_LOW (usually 64kB) is fine as + * the BIOS used for HVM guests is well behaved and won't + * clobber memory other than the first 4kB. + */ + for (pa = PAGE_SIZE; + !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) || + memblock_is_reserved(pa); + pa += PAGE_SIZE) + ; + + shared_info_pfn = PHYS_PFN(pa); + + memblock_reserve(pa, PAGE_SIZE); + HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE); +} + +static void __init xen_hvm_init_mem_mapping(void) +{ + early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); + HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); +} + static void __init init_hvm_pv_info(void) { int major, minor; @@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void) init_hvm_pv_info(); + reserve_shared_info(); xen_hvm_init_shared_info(); /* @@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = { .init_platform = xen_hvm_guest_init, .pin_vcpu = xen_pin_vcpu, .x2apic_available = xen_x2apic_para_available, + .init_mem_mapping = xen_hvm_init_mem_mapping, }; EXPORT_SYMBOL(x86_hyper_xen_hvm); diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 2d716ebc5a5e90d62c3759e01a512e0ff6ab9de4..dff7cc39437caba214fac506f57009837f2223c8 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -1,5 +1,6 @@ generic-y += bug.h generic-y += clkdev.h +generic-y += device.h generic-y += div64.h generic-y += dma-contiguous.h generic-y += emergency-restart.h @@ -17,6 +18,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += param.h generic-y += percpu.h generic-y += preempt.h generic-y += rwsem.h diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h deleted file mode 100644 index 1deeb8ebbb1bc30f236e3592d9920c678ba533ba..0000000000000000000000000000000000000000 --- a/arch/xtensa/include/asm/device.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#ifndef _ASM_XTENSA_DEVICE_H -#define _ASM_XTENSA_DEVICE_H - -struct dev_archdata { -}; - -struct pdev_archdata { -}; - -#endif /* _ASM_XTENSA_DEVICE_H */ diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h deleted file mode 100644 index 0a70e780ef2a33f3466fe9201897578d2771ba70..0000000000000000000000000000000000000000 --- a/arch/xtensa/include/asm/param.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * include/asm-xtensa/param.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ -#ifndef _XTENSA_PARAM_H -#define _XTENSA_PARAM_H - -#include - -# define HZ CONFIG_HZ /* internal timer frequency */ -# define USER_HZ 100 /* for user interfaces in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */ -#endif /* _XTENSA_PARAM_H */ diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index d159e9b9c01837ba5aa9e77d50c3c14e475ce367..672391003e40fac4f814fe5b6aa1dc04e791c81b 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) } EXPORT_SYMBOL(__sync_fetch_and_or_4); -#ifdef CONFIG_NET /* * Networking support */ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); -#endif /* CONFIG_NET */ /* * Architecture-specific symbols diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 1a804a2f9a5be6212c57febc01f6d28f47b8c91a..3c75c4e597da8f086f65de51201e0d37d6672733 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) clear_page_alias(kvaddr, paddr); preempt_enable(); } +EXPORT_SYMBOL(clear_user_highpage); void copy_user_highpage(struct page *dst, struct page *src, unsigned long vaddr, struct vm_area_struct *vma) @@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src, copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); preempt_enable(); } - -#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ - -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +EXPORT_SYMBOL(copy_user_highpage); /* * Any time the kernel writes to a user page cache page, or it is about to @@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page) /* There shouldn't be an entry in the cache for this page anymore. */ } - +EXPORT_SYMBOL(flush_dcache_page); /* * For now, flush the whole cache. FIXME?? @@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma, __flush_invalidate_dcache_all(); __invalidate_icache_all(); } +EXPORT_SYMBOL(local_flush_cache_range); /* * Remove any entry in the cache for this page. @@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, __flush_invalidate_dcache_page_alias(virt, phys); __invalidate_icache_page_alias(virt, phys); } +EXPORT_SYMBOL(local_flush_cache_page); -#endif +#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ void update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) @@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) flush_tlb_page(vma, addr); -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +#if (DCACHE_WAY_SIZE > PAGE_SIZE) if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { unsigned long phys = page_to_phys(page); @@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) * flush_dcache_page() on the page. */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +#if (DCACHE_WAY_SIZE > PAGE_SIZE) void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 63e771ab56d80ade8cd5b5e8ccd86f2fac5f0573..859f0a8c97c8a1bf58b6ed4dc016c0ce0fb355ad 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -71,17 +71,29 @@ struct bfq_service_tree { * * bfq_sched_data is the basic scheduler queue. It supports three * ioprio_classes, and can be used either as a toplevel queue or as an - * intermediate queue on a hierarchical setup. @next_in_service - * points to the active entity of the sched_data service trees that - * will be scheduled next. It is used to reduce the number of steps - * needed for each hierarchical-schedule update. + * intermediate queue in a hierarchical setup. * * The supported ioprio_classes are the same as in CFQ, in descending * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. * Requests from higher priority queues are served before all the * requests from lower priority queues; among requests of the same * queue requests are served according to B-WF2Q+. - * All the fields are protected by the queue lock of the containing bfqd. + * + * The schedule is implemented by the service trees, plus the field + * @next_in_service, which points to the entity on the active trees + * that will be served next, if 1) no changes in the schedule occurs + * before the current in-service entity is expired, 2) the in-service + * queue becomes idle when it expires, and 3) if the entity pointed by + * in_service_entity is not a queue, then the in-service child entity + * of the entity pointed by in_service_entity becomes idle on + * expiration. This peculiar definition allows for the following + * optimization, not yet exploited: while a given entity is still in + * service, we already know which is the best candidate for next + * service among the other active entitities in the same parent + * entity. We can then quickly compare the timestamps of the + * in-service entity with those of such best candidate. + * + * All fields are protected by the lock of the containing bfqd. */ struct bfq_sched_data { /* entity in service */ diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 979f8f21b7e2b17268b2db3c9510c333fccbe10f..911aa7431dbeb08d6db591c1a0f577bd3af56e22 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) /* * This function tells whether entity stops being a candidate for next - * service, according to the following logic. + * service, according to the restrictive definition of the field + * next_in_service. In particular, this function is invoked for an + * entity that is about to be set in service. * - * This function is invoked for an entity that is about to be set in - * service. If such an entity is a queue, then the entity is no longer - * a candidate for next service (i.e, a candidate entity to serve - * after the in-service entity is expired). The function then returns - * true. + * If entity is a queue, then the entity is no longer a candidate for + * next service according to the that definition, because entity is + * about to become the in-service queue. This function then returns + * true if entity is a queue. * - * In contrast, the entity could stil be a candidate for next service - * if it is not a queue, and has more than one child. In fact, even if - * one of its children is about to be set in service, other children - * may still be the next to serve. As a consequence, a non-queue - * entity is not a candidate for next-service only if it has only one - * child. And only if this condition holds, then the function returns - * true for a non-queue entity. + * In contrast, entity could still be a candidate for next service if + * it is not a queue, and has more than one active child. In fact, + * even if one of its children is about to be set in service, other + * active children may still be the next to serve, for the parent + * entity, even according to the above definition. As a consequence, a + * non-queue entity is not a candidate for next-service only if it has + * only one active child. And only if this condition holds, then this + * function returns true for a non-queue entity. */ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) { @@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) bfqg = container_of(entity, struct bfq_group, entity); + /* + * The field active_entities does not always contain the + * actual number of active children entities: it happens to + * not account for the in-service entity in case the latter is + * removed from its active tree (which may get done after + * invoking the function bfq_no_longer_next_in_service in + * bfq_get_next_queue). Fortunately, here, i.e., while + * bfq_no_longer_next_in_service is not yet completed in + * bfq_get_next_queue, bfq_active_extract has not yet been + * invoked, and thus active_entities still coincides with the + * actual number of active entities. + */ if (bfqg->active_entities == 1) return true; @@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, * one of its children receives a new request. * * Basically, this function updates the timestamps of entity and - * inserts entity into its active tree, ater possible extracting it + * inserts entity into its active tree, ater possibly extracting it * from its idle tree. */ static void __bfq_activate_entity(struct bfq_entity *entity, @@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) entity->start = entity->finish; /* * In addition, if the entity had more than one child - * when set in service, then was not extracted from + * when set in service, then it was not extracted from * the active tree. This implies that the position of * the entity in the active tree may need to be * changed now, because we have just updated the start @@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) * time in a moment (the requeueing is then, more * precisely, a repositioning in this case). To * implement this repositioning, we: 1) dequeue the - * entity here, 2) update the finish time and - * requeue the entity according to the new - * timestamps below. + * entity here, 2) update the finish time and requeue + * the entity according to the new timestamps below. */ if (entity->tree) bfq_active_extract(st, entity); @@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity, /** - * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, - * and activate, requeue or reposition all ancestors - * for which such an update becomes necessary. + * bfq_activate_requeue_entity - activate or requeue an entity representing a + * bfq_queue, and activate, requeue or reposition + * all ancestors for which such an update becomes + * necessary. * @entity: the entity to activate. * @non_blocking_wait_rq: true if this entity was waiting for a request * @requeue: true if this is a requeue, which implies that bfqq is @@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, * @ins_into_idle_tree: if false, the entity will not be put into the * idle tree. * - * Deactivates an entity, independently from its previous state. Must + * Deactivates an entity, independently of its previous state. Must * be invoked only if entity is on a service tree. Extracts the entity - * from that tree, and if necessary and allowed, puts it on the idle + * from that tree, and if necessary and allowed, puts it into the idle * tree. */ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) @@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) st = bfq_entity_service_tree(entity); is_in_service = entity == sd->in_service_entity; - if (is_in_service) + if (is_in_service) { bfq_calc_finish(entity, entity->service); + sd->in_service_entity = NULL; + } if (entity->tree == &st->active) bfq_active_extract(st, entity); @@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) /** * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. * @entity: the entity to deactivate. - * @ins_into_idle_tree: true if the entity can be put on the idle tree + * @ins_into_idle_tree: true if the entity can be put into the idle tree */ static void bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree, @@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, */ bfq_update_next_in_service(sd, NULL); - if (sd->next_in_service) + if (sd->next_in_service || sd->in_service_entity) { /* - * The parent entity is still backlogged, - * because next_in_service is not NULL. So, no - * further upwards deactivation must be - * performed. Yet, next_in_service has - * changed. Then the schedule does need to be - * updated upwards. + * The parent entity is still active, because + * either next_in_service or in_service_entity + * is not NULL. So, no further upwards + * deactivation must be performed. Yet, + * next_in_service has changed. Then the + * schedule does need to be updated upwards. + * + * NOTE If in_service_entity is not NULL, then + * next_in_service may happen to be NULL, + * although the parent entity is evidently + * active. This happens if 1) the entity + * pointed by in_service_entity is the only + * active entity in the parent entity, and 2) + * according to the definition of + * next_in_service, the in_service_entity + * cannot be considered as + * next_in_service. See the comments on the + * definition of next_in_service for details. */ break; + } /* * If we get here, then the parent is no more @@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) /* * If entity is no longer a candidate for next - * service, then we extract it from its active tree, - * for the following reason. To further boost the - * throughput in some special case, BFQ needs to know - * which is the next candidate entity to serve, while - * there is already an entity in service. In this - * respect, to make it easy to compute/update the next - * candidate entity to serve after the current - * candidate has been set in service, there is a case - * where it is necessary to extract the current - * candidate from its service tree. Such a case is - * when the entity just set in service cannot be also - * a candidate for next service. Details about when - * this conditions holds are reported in the comments - * on the function bfq_no_longer_next_in_service() - * invoked below. + * service, then it must be extracted from its active + * tree, so as to make sure that it won't be + * considered when computing next_in_service. See the + * comments on the function + * bfq_no_longer_next_in_service() for details. */ if (bfq_no_longer_next_in_service(entity)) bfq_active_extract(bfq_entity_service_tree(entity), entity); /* - * For the same reason why we may have just extracted - * entity from its active tree, we may need to update - * next_in_service for the sched_data of entity too, - * regardless of whether entity has been extracted. - * In fact, even if entity has not been extracted, a - * descendant entity may get extracted. Such an event - * would cause a change in next_in_service for the - * level of the descendant entity, and thus possibly - * back to upper levels. + * Even if entity is not to be extracted according to + * the above check, a descendant entity may get + * extracted in one of the next iterations of this + * loop. Such an event could cause a change in + * next_in_service for the level of the descendant + * entity, and thus possibly back to this level. * - * We cannot perform the resulting needed update - * before the end of this loop, because, to know which - * is the correct next-to-serve candidate entity for - * each level, we need first to find the leaf entity - * to set in service. In fact, only after we know - * which is the next-to-serve leaf entity, we can - * discover whether the parent entity of the leaf - * entity becomes the next-to-serve, and so on. + * However, we cannot perform the resulting needed + * update of next_in_service for this level before the + * end of the whole loop, because, to know which is + * the correct next-to-serve candidate entity for each + * level, we need first to find the leaf entity to set + * in service. In fact, only after we know which is + * the next-to-serve leaf entity, we can discover + * whether the parent entity of the leaf entity + * becomes the next-to-serve, and so on. */ - } bfqq = bfq_entity_to_bfqq(entity); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 83e92beb3c9feb25f3be917e2b675f90b1544b5f..9b1ea478577b033195f34b457c85ad16eb759c6a 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work) */ bool __bio_integrity_endio(struct bio *bio) { - if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) { - struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct bio_integrity_payload *bip = bio_integrity(bio); + if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && + (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) { INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); queue_work(kintegrityd_wq, &bip->bip_work); return false; diff --git a/block/blk-core.c b/block/blk-core.c index 970b9c9638c55f4852019f452f64d2a4c66c1a9c..dbecbf4a64e05b764a1ba0a3c4dfe8f01bc7aac1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3421,6 +3421,10 @@ EXPORT_SYMBOL(blk_finish_plug); */ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) { + /* not support for RQF_PM and ->rpm_status in blk-mq yet */ + if (q->mq_ops) + return; + q->dev = dev; q->rpm_status = RPM_ACTIVE; pm_runtime_set_autosuspend_delay(q->dev, -1); diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 4891f042a22ff7c7a27abb0f71a6c65fa32ffd51..9f8cffc8a701ec24c87729b9ef4a1048cc7d205f 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -17,9 +17,9 @@ static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) { /* - * Non online CPU will be mapped to queue index 0. + * Non present CPU will be mapped to queue index 0. */ - if (!cpu_online(cpu)) + if (!cpu_present(cpu)) return 0; return cpu % nr_queues; } diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c index 0c3354cf3552877d7542cdb32f0ce4102a79fcce..76944e3271bf34a730e62fa0b27266abce86942e 100644 --- a/block/blk-mq-pci.c +++ b/block/blk-mq-pci.c @@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev) for (queue = 0; queue < set->nr_hw_queues; queue++) { mask = pci_irq_get_affinity(pdev, queue); if (!mask) - return -EINVAL; + goto fallback; for_each_cpu(cpu, mask) set->mq_map[cpu] = queue; } return 0; + +fallback: + WARN_ON_ONCE(set->nr_hw_queues > 1); + for_each_possible_cpu(cpu) + set->mq_map[cpu] = 0; + return 0; } EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); diff --git a/block/blk-mq.c b/block/blk-mq.c index 041f7b7fa0d6def444e9349b6cf748afc8e89b2d..4603b115e234887860cbb28520c36ba9e5e7efd8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q, struct elevator_queue *e = q->elevator; struct request *rq; unsigned int tag; + struct blk_mq_ctx *local_ctx = NULL; blk_queue_enter_live(q); data->q = q; if (likely(!data->ctx)) - data->ctx = blk_mq_get_ctx(q); + data->ctx = local_ctx = blk_mq_get_ctx(q); if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->ctx->cpu); if (op & REQ_NOWAIT) @@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q, tag = blk_mq_get_tag(data); if (tag == BLK_MQ_TAG_FAIL) { + if (local_ctx) { + blk_mq_put_ctx(local_ctx); + data->ctx = NULL; + } blk_queue_exit(q); return NULL; } @@ -355,13 +360,13 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, return ERR_PTR(ret); rq = blk_mq_get_request(q, NULL, op, &alloc_data); - - blk_mq_put_ctx(alloc_data.ctx); blk_queue_exit(q); if (!rq) return ERR_PTR(-EWOULDBLOCK); + blk_mq_put_ctx(alloc_data.ctx); + rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; @@ -406,7 +411,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, alloc_data.ctx = __blk_mq_get_ctx(q, cpu); rq = blk_mq_get_request(q, NULL, op, &alloc_data); - blk_queue_exit(q); if (!rq) @@ -679,8 +683,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs) { - kblockd_schedule_delayed_work(&q->requeue_work, - msecs_to_jiffies(msecs)); + kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, + msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 6f8f6b86bfe29e0cf0c8fbf2cbe6ab26b705bc9a..0cf5fefdb859b1158460faa278017a524f54a93a 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, u8 *ihash = ohash + crypto_ahash_digestsize(auth); u32 tmp[2]; + if (!authsize) + goto decrypt; + /* Move high-order bits of sequence number back. */ scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); @@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, if (crypto_memneq(ihash, ohash, authsize)) return -EBADMSG; +decrypt: + sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index fc6c416f8724670aef2f29d4d81d373ed400dcd5..d5999eb41c00176e840d90b84241ab855c8881c4 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, { "CAV900D", APD_ADDR(vulcan_spi_desc) }, - { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, - { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, + { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, + { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index e51a1e98e62f4f5ab5f2132184e9686ab1a1f3a9..f88caf5aab76217d73feb277b1918e399cc61beb 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = { }; struct lpss_private_data { + struct acpi_device *adev; void __iomem *mmio_base; resource_size_t mmio_size; unsigned int fixed_clk_rate; @@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = { static void byt_pwm_setup(struct lpss_private_data *pdata) { + struct acpi_device *adev = pdata->adev; + + /* Only call pwm_add_table for the first PWM controller */ + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + if (!acpi_dev_present("INT33FD", NULL, -1)) pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } @@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = { static void bsw_pwm_setup(struct lpss_private_data *pdata) { + struct acpi_device *adev = pdata->adev; + + /* Only call pwm_add_table for the first PWM controller */ + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); } @@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, goto err_out; } + pdata->adev = adev; pdata->dev_desc = dev_desc; if (dev_desc->setup) diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 8c4e0a18460a78df600067da69537e6256d6bc1c..bf22c29d25179e937523c026c8028f644f3379f6 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void) found = false; resource_list_for_each_entry(rentry, &resource_list) { - if (resource_contains(rentry->res, &res)) { + if (rentry->res->flags == res.flags && + resource_overlaps(rentry->res, &res)) { + if (res.start < rentry->res->start) + rentry->res->start = res.start; + if (res.end > rentry->res->end) + rentry->res->end = res.end; found = true; break; } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ddb01e9fa5b225eda5a83cf8a06501ddaded2fc5..62068a5e814f026a44e212bd2b93941590a3fe53 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false; module_param(ec_freeze_events, bool, 0644); MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); +static bool ec_no_wakeup __read_mostly; +module_param(ec_no_wakeup, bool, 0644); +MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle"); + struct acpi_ec_query_handler { struct list_head node; acpi_ec_query_func func; @@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) spin_unlock_irqrestore(&ec->lock, flags); __acpi_ec_flush_event(ec); } + +void acpi_ec_flush_work(void) +{ + if (first_ec) + __acpi_ec_flush_event(first_ec); + + flush_scheduled_work(); +} #endif /* CONFIG_PM_SLEEP */ static bool acpi_ec_guard_event(struct acpi_ec *ec) @@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev) return 0; } +static int acpi_ec_suspend_noirq(struct device *dev) +{ + struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); + + /* + * The SCI handler doesn't run at this point, so the GPE can be + * masked at the low level without side effects. + */ + if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && + ec->reference_count >= 1) + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); + + return 0; +} + +static int acpi_ec_resume_noirq(struct device *dev) +{ + struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); + + if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && + ec->reference_count >= 1) + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); + + return 0; +} + static int acpi_ec_resume(struct device *dev) { struct acpi_ec *ec = @@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev) #endif static const struct dev_pm_ops acpi_ec_pm = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9531d3276f652a14e5eb39d9e0cb8a0325353f72..58dd7ab3c6538bd490b45e2e420c3d822ecf8c3e 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -193,6 +193,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, void *data); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); +#ifdef CONFIG_PM_SLEEP +void acpi_ec_flush_work(void); +#endif + /*-------------------------------------------------------------------------- Suspend/Resume diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index edb0c79f7c64eea8b4d3cdde4467a356c20a32f8..917f1cc0fda4c2f6c8cc64f7b90dc252a264ac7a 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -443,7 +443,7 @@ int __init acpi_numa_init(void) * So go over all cpu entries in SRAT to get apicid to node mapping. */ - /* SRAT: Static Resource Affinity Table */ + /* SRAT: System Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { struct acpi_subtable_proc srat_proc[3]; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index be17664736b2f12388148580ccb0b2f0450c348e..fa8243c5c062c31b74847a0eb7c87e1f2d30e240 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -777,11 +777,11 @@ static void acpi_freeze_sync(void) /* * Process all pending events in case there are any wakeup ones. * - * The EC driver uses the system workqueue, so that one needs to be - * flushed too. + * The EC driver uses the system workqueue and an additional special + * one, so those need to be flushed too. */ + acpi_ec_flush_work(); acpi_os_wait_events_complete(); - flush_scheduled_work(); s2idle_wakeup = false; } diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 4ac3e06b41d846440d35079dfa54885111eec0df..98aa8c808a3346dc559e2f419ae4378dec075ffc 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -16,6 +16,16 @@ #include #include +/* + * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as + * occasionally getting stuck as 1. To avoid the potential for a hang, check + * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART + * implementations, so only do so if an affected platform is detected in + * parse_spcr(). + */ +bool qdf2400_e44_present; +EXPORT_SYMBOL(qdf2400_e44_present); + /* * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. * Detect them by examining the OEM fields in the SPCR header, similiar to PCI @@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon) goto done; } - if (qdf2400_erratum_44_present(&table->header)) - uart = "qdf2400_e44"; + /* + * If the E44 erratum is required, then we need to tell the pl011 + * driver to implement the work-around. + * + * The global variable is used by the probe function when it + * creates the UARTs, whether or not they're used as a console. + * + * If the user specifies "traditional" earlycon, the qdf2400_e44 + * console name matches the EARLYCON_DECLARE() statement, and + * SPCR is not used. Parameter "earlycon" is false. + * + * If the user specifies "SPCR" earlycon, then we need to update + * the console name so that it also says "qdf2400_e44". Parameter + * "earlycon" is true. + * + * For consistency, if we change the console name, then we do it + * for everyone, not just earlycon. + */ + if (qdf2400_erratum_44_present(&table->header)) { + qdf2400_e44_present = true; + if (earlycon) + uart = "qdf2400_e44"; + } + if (xgene_8250_erratum_present(table)) iotype = "mmio32"; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 948fc86980a1b75d1b9f99e9588bf894e024a88c..363fc5330c211456762e049d1569a04940c580ef 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -215,7 +215,7 @@ config SATA_FSL config SATA_GEMINI tristate "Gemini SATA bridge support" - depends on PATA_FTIDE010 + depends on ARCH_GEMINI || COMPILE_TEST default ARCH_GEMINI help This enabled support for the FTIDE010 to SATA bridge @@ -613,7 +613,7 @@ config PATA_FTIDE010 tristate "Faraday Technology FTIDE010 PATA support" depends on OF depends on ARM - default ARCH_GEMINI + depends on SATA_GEMINI help This option enables support for the Faraday FTIDE010 PATA controller found in the Cortina Gemini SoCs. diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8453f9a4682f855e910f59d1d78860f639d4bf59..fa7dd4394c02b642c00119bd00a4b59b18921a0d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, retry: ata_tf_init(dev, &tf); if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && - !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { + !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { tf.command = ATA_CMD_READ_LOG_DMA_EXT; tf.protocol = ATA_PROT_DMA; dma = true; @@ -2102,8 +2102,8 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, buf, sectors * ATA_SECT_SIZE, 0); if (err_mask && dma) { - dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; - ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); + dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; + ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n"); goto retry; } diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index b70bcf6d2914b6a82967fee346f5865b5c3b66b8..3dbd05532c09cdde995935539efa917e4f96dcd9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, /** * ata_eh_done - EH action complete -* @ap: target ATA port + * @link: ATA link for which EH actions are complete * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * @@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) /** * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT - * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to + * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to * @cmd: scsi command for which the sense code should be set * * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK @@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) struct ata_link *link; struct ata_device *dev; unsigned long flags; - int rc = 0; /* are we resuming? */ spin_lock_irqsave(ap->lock, flags); @@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) ata_acpi_set_state(ap, ap->pm_mesg); if (ap->ops->port_resume) - rc = ap->ops->port_resume(ap); + ap->ops->port_resume(ap); /* tell ACPI that we're resuming */ ata_acpi_on_resume(ap); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d462c5a3a7efd8b3c4933df845c0f3f1d8637bac..44ba292f2cd793f07e279ada3b2c83c44578f0cf 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { if (!sata_pmp_attached(ap)) { - if (likely(devno < ata_link_max_devices(&ap->link))) + if (likely(devno >= 0 && + devno < ata_link_max_devices(&ap->link))) return &ap->link.device[devno]; } else { - if (likely(devno < ap->nr_pmp_links)) + if (likely(devno >= 0 && + devno < ap->nr_pmp_links)) return &ap->pmp_link[devno].device[0]; } diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index ee98447587366f240a580e56a49e0983f58727e2..537d11869069aae7d26e1e253ed3f4d587d25bbf 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = { .compatible = "renesas,sata-r8a7795", .data = (void *)RCAR_GEN2_SATA }, + { + .compatible = "renesas,rcar-gen2-sata", + .data = (void *)RCAR_GEN2_SATA + }, + { + .compatible = "renesas,rcar-gen3-sata", + .data = (void *)RCAR_GEN2_SATA + }, { }, }; MODULE_DEVICE_TABLE(of, sata_rcar_match); diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c87901185de6d558545ebe58d8d0b..1c152aed6b8265409a0ab2a677ce4bd0a99d62a9 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de { if (dev && dev->dma_mem) return dev->dma_mem; - return dma_coherent_default_memory; + return NULL; } static inline dma_addr_t dma_get_device_base(struct device *dev, @@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev, } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); -/** - * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area - * - * @dev: device from which we allocate memory - * @size: size of requested memory area - * @dma_handle: This will be filled with the correct dma handle - * @ret: This pointer will be filled with the virtual address - * to allocated area. - * - * This function should be only called from per-arch dma_alloc_coherent() - * to support allocation from per-device coherent memory pools. - * - * Returns 0 if dma_alloc_coherent should continue with allocating from - * generic memory areas, or !0 if dma_alloc_coherent should return @ret. - */ -int dma_alloc_from_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) +static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); int order = get_order(size); unsigned long flags; int pageno; int dma_memory_map; + void *ret; - if (!mem) - return 0; - - *ret = NULL; spin_lock_irqsave(&mem->spinlock, flags); if (unlikely(size > (mem->size << PAGE_SHIFT))) @@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, goto err; /* - * Memory was found in the per-device area. + * Memory was found in the coherent area. */ - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); - *ret = mem->virt_base + (pageno << PAGE_SHIFT); + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + ret = mem->virt_base + (pageno << PAGE_SHIFT); dma_memory_map = (mem->flags & DMA_MEMORY_MAP); spin_unlock_irqrestore(&mem->spinlock, flags); if (dma_memory_map) - memset(*ret, 0, size); + memset(ret, 0, size); else - memset_io(*ret, 0, size); + memset_io(ret, 0, size); - return 1; + return ret; err: spin_unlock_irqrestore(&mem->spinlock, flags); + return NULL; +} + +/** + * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool + * @dev: device from which we allocate memory + * @size: size of requested memory area + * @dma_handle: This will be filled with the correct dma handle + * @ret: This pointer will be filled with the virtual address + * to allocated area. + * + * This function should be only called from per-arch dma_alloc_coherent() + * to support allocation from per-device coherent memory pools. + * + * Returns 0 if dma_alloc_coherent should continue with allocating from + * generic memory areas, or !0 if dma_alloc_coherent should return @ret. + */ +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + if (!mem) + return 0; + + *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + if (*ret) + return 1; + /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the @@ -225,25 +235,20 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } -EXPORT_SYMBOL(dma_alloc_from_coherent); +EXPORT_SYMBOL(dma_alloc_from_dev_coherent); -/** - * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool - * @dev: device from which the memory was allocated - * @order: the order of pages allocated - * @vaddr: virtual address of allocated pages - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, releases that memory. - * - * Returns 1 if we correctly released the memory, or 0 if - * dma_release_coherent() should proceed with releasing memory from - * generic pools. - */ -int dma_release_from_coherent(struct device *dev, int order, void *vaddr) +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dma_coherent_default_memory, size, + dma_handle); +} +static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) +{ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; @@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) } return 0; } -EXPORT_SYMBOL(dma_release_from_coherent); /** - * dma_mmap_from_coherent() - try to mmap the memory allocated from - * per-device coherent memory pool to userspace + * dma_release_from_dev_coherent() - free memory to device coherent memory pool * @dev: device from which the memory was allocated - * @vma: vm_area for the userspace memory - * @vaddr: cpu address returned by dma_alloc_from_coherent - * @size: size of the memory buffer allocated by dma_alloc_from_coherent - * @ret: result from remap_pfn_range() + * @order: the order of pages allocated + * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, maps that memory to the provided vma. + * coherent memory pool and if so, releases that memory. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if we correctly released the memory, or 0 if the caller should + * proceed with releasing memory from generic pools. */ -int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, - void *vaddr, size_t size, int *ret) +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + return __dma_release_from_coherent(mem, order, vaddr); +} +EXPORT_SYMBOL(dma_release_from_dev_coherent); + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + +static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) +{ if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; @@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, } return 0; } -EXPORT_SYMBOL(dma_mmap_from_coherent); + +/** + * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool + * @dev: device from which the memory was allocated + * @vma: vm_area for the userspace memory + * @vaddr: cpu address returned by dma_alloc_from_dev_coherent + * @size: size of the memory buffer allocated + * @ret: result from remap_pfn_range() + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, maps that memory to the provided vma. + * + * Returns 1 if we correctly mapped the memory, or 0 if the caller should + * proceed with mapping memory from generic pools. + */ +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); +} +EXPORT_SYMBOL(dma_mmap_from_dev_coherent); + +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, + size_t size, int *ret) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, + vaddr, size, ret); +} /* * Support for reserved memory regions defined in device tree diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e0c1a597d25189547ab797b568f36..b555ff9dd8fceb176af2f567157165726e9ab314 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf770ee32359468d2b8d07e57bde667..bfbe1e15412889dfb40e699af7c3623a06d83253 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -30,7 +30,6 @@ #include #include #include -#include #include @@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void) * state of the firmware loading. */ struct fw_state { - struct swait_queue_head wq; + struct completion completion; enum fw_status status; }; static void fw_state_init(struct fw_state *fw_st) { - init_swait_queue_head(&fw_st->wq); + init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } @@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) { long ret; - ret = swait_event_interruptible_timeout(fw_st->wq, - __fw_state_is_done(READ_ONCE(fw_st->status)), - timeout); + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) return -ENOENT; if (!ret) @@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st, WRITE_ONCE(fw_st->status, status); if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) - swake_up(&fw_st->wq); + complete_all(&fw_st->completion); } #define fw_state_start(fw_st) \ __fw_state_set(fw_st, FW_STATUS_LOADING) #define fw_state_done(fw_st) \ __fw_state_set(fw_st, FW_STATUS_DONE) +#define fw_state_aborted(fw_st) \ + __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_wait(fw_st) \ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) -#ifndef CONFIG_FW_LOADER_USER_HELPER - -#define fw_state_is_aborted(fw_st) false - -#else /* CONFIG_FW_LOADER_USER_HELPER */ - static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) { return fw_st->status == status; } +#define fw_state_is_aborted(fw_st) \ + __fw_state_check(fw_st, FW_STATUS_ABORTED) + +#ifdef CONFIG_FW_LOADER_USER_HELPER + #define fw_state_aborted(fw_st) \ __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_is_done(fw_st) \ __fw_state_check(fw_st, FW_STATUS_DONE) #define fw_state_is_loading(fw_st) \ __fw_state_check(fw_st, FW_STATUS_LOADING) -#define fw_state_is_aborted(fw_st) \ - __fw_state_check(fw_st, FW_STATUS_ABORTED) #define fw_state_wait_timeout(fw_st, timeout) \ __fw_state_wait_common(fw_st, timeout) @@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 1; /* need to load */ } +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct firmware_buf and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct firmware_buf *buf; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + buf = fw->priv; + if (!fw_state_is_aborted(&buf->fw_st)) + fw_state_aborted(&buf->fw_st); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, out: if (ret < 0) { + fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 87a0a29f6e7e3c5a5f84332cc72de4543e6a86dd..5bdf923294a5d610429581c83e558529f7d40b62 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -908,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) continue; } sk_set_memalloc(sock->sk); - sock->sk->sk_sndtimeo = nbd->tag_set.timeout; + if (nbd->tag_set.timeout) + sock->sk->sk_sndtimeo = nbd->tag_set.timeout; atomic_inc(&config->recv_threads); refcount_inc(&nbd->config_refs); old = nsock->sock; @@ -922,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) mutex_unlock(&nsock->tx_lock); sockfd_put(old); + clear_bit(NBD_DISCONNECTED, &config->runtime_flags); + /* We take the tx_mutex in an error path in the recv_work, so we * need to queue_work outside of the tx_mutex. */ @@ -978,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd) int i, ret; for (i = 0; i < config->num_connections; i++) { + struct nbd_sock *nsock = config->socks[i]; + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + mutex_lock(&nsock->tx_lock); ret = sock_xmit(nbd, i, 1, &from, 0, NULL); if (ret <= 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret); + mutex_unlock(&nsock->tx_lock); } } @@ -991,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd) struct nbd_config *config = nbd->config; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); - if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, - &config->runtime_flags)) - send_disconnects(nbd); + set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); + send_disconnects(nbd); return 0; } @@ -1074,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd) return -ENOMEM; } sk_set_memalloc(config->socks[i]->sock->sk); - config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout; + if (nbd->tag_set.timeout) + config->socks[i]->sock->sk->sk_sndtimeo = + nbd->tag_set.timeout; atomic_inc(&config->recv_threads); refcount_inc(&nbd->config_refs); INIT_WORK(&args->work, recv_work); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 6b16ead1da5871abcef5b2233733f281158596a8..ad9749463d4fa9a382afa7f24587bbbe3a2efcc9 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -875,6 +875,56 @@ static void print_version(void) printk(KERN_INFO "%s", version); } +struct vdc_check_port_data { + int dev_no; + char *type; +}; + +static int vdc_device_probed(struct device *dev, void *arg) +{ + struct vio_dev *vdev = to_vio_dev(dev); + struct vdc_check_port_data *port_data; + + port_data = (struct vdc_check_port_data *)arg; + + if ((vdev->dev_no == port_data->dev_no) && + (!(strcmp((char *)&vdev->type, port_data->type))) && + dev_get_drvdata(dev)) { + /* This device has already been configured + * by vdc_port_probe() + */ + return 1; + } else { + return 0; + } +} + +/* Determine whether the VIO device is part of an mpgroup + * by locating all the virtual-device-port nodes associated + * with the parent virtual-device node for the VIO device + * and checking whether any of these nodes are vdc-ports + * which have already been configured. + * + * Returns true if this device is part of an mpgroup and has + * already been probed. + */ +static bool vdc_port_mpgroup_check(struct vio_dev *vdev) +{ + struct vdc_check_port_data port_data; + struct device *dev; + + port_data.dev_no = vdev->dev_no; + port_data.type = (char *)&vdev->type; + + dev = device_find_child(vdev->dev.parent, &port_data, + vdc_device_probed); + + if (dev) + return true; + + return false; +} + static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; @@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto err_out_release_mdesc; } + /* Check if this device is part of an mpgroup */ + if (vdc_port_mpgroup_check(vdev)) { + printk(KERN_WARNING + "VIO: Ignoring extra vdisk port %s", + dev_name(&vdev->dev)); + goto err_out_release_mdesc; + } + port = kzalloc(sizeof(*port), GFP_KERNEL); err = -ENOMEM; if (!port) { @@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (err) goto err_out_free_tx_ring; + /* Note that the device driver_data is used to determine + * whether the port has been probed. + */ dev_set_drvdata(&vdev->dev, port); mdesc_release(hp); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4e02aa5fdac053dfa254fb05aac1b2c252e4c1be..1498b899a593e31951c835f4f537d1bb03d0e596 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -541,12 +541,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, int i; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); - for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) - if (sysfs_streq(buf, virtblk_cache_types[i])) - break; - + i = sysfs_match_string(virtblk_cache_types, buf); if (i < 0) - return -EINVAL; + return i; virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); virtblk_update_cache_mode(vdev); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c852ed3c01d55f23c69e4c9133b2fd88137402f9..2468c28d477110e6bcb2cbf5225dd481c6ec5b95 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -111,7 +111,7 @@ struct blk_shadow { }; struct blkif_req { - int error; + blk_status_t error; }; static inline struct blkif_req *blkif_req(struct request *rq) @@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ + bool new_persistent_gnts = false; struct scatterlist *sg; int num_sg, max_grefs, num_grant; @@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri */ max_grefs += INDIRECT_GREFS(max_grefs); - /* - * We have to reserve 'max_grefs' grants because persistent - * grants are shared by all rings. - */ - if (max_grefs > 0) - if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) { + /* Check if we have enough persistent grants to allocate a requests */ + if (rinfo->persistent_gnts_c < max_grefs) { + new_persistent_gnts = true; + + if (gnttab_alloc_grant_references( + max_grefs - rinfo->persistent_gnts_c, + &setup.gref_head) < 0) { gnttab_request_free_callback( &rinfo->callback, blkif_restart_queue_callback, rinfo, - max_grefs); + max_grefs - rinfo->persistent_gnts_c); return 1; } + } /* Fill out a communications ring structure. */ id = blkif_ring_get_request(rinfo, req, &ring_req); @@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri if (unlikely(require_extra_req)) rinfo->shadow[extra_id].req = *extra_ring_req; - if (max_grefs > 0) + if (new_persistent_gnts) gnttab_free_grant_references(setup.gref_head); return 0; @@ -906,8 +909,8 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_IOERR; out_busy: - spin_unlock_irqrestore(&rinfo->ring_lock, flags); blk_mq_stop_hw_queue(hctx); + spin_unlock_irqrestore(&rinfo->ring_lock, flags); return BLK_STS_RESOURCE; } @@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - blkif_req(req)->error = -EOPNOTSUPP; + blkif_req(req)->error = BLK_STS_NOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && rinfo->shadow[id].req.u.rw.nr_segments == 0)) { @@ -2072,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev) /* * Get the bios in the request so we can re-queue them. */ - if (req_op(shadow[i].request) == REQ_OP_FLUSH || - req_op(shadow[i].request) == REQ_OP_DISCARD || - req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || + if (req_op(shadow[j].request) == REQ_OP_FLUSH || + req_op(shadow[j].request) == REQ_OP_DISCARD || + req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || shadow[j].request->cmd_flags & REQ_FUA) { /* * Flush operations don't contain bios, so diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 856d5dc02451d44b59695127994017877cd02b38..3b1b6340ba13a2977ffd0a13424ce95322f67f0e 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); - char compressor[CRYPTO_MAX_ALG_NAME]; + char compressor[ARRAY_SIZE(zram->compressor)]; size_t sz; strlcpy(compressor, buf, sizeof(compressor)); @@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev, return -EBUSY; } - strlcpy(zram->compressor, compressor, sizeof(compressor)); + strcpy(zram->compressor, compressor); up_write(&zram->init_lock); return len; } diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c index 1e6e0269edcc18ed9f7c8e4cc65e6ce6595c8683..f76be6bd6eb3dfd52bac8c847f3a93e5c3f7777c 100644 --- a/drivers/bus/uniphier-system-bus.c +++ b/drivers/bus/uniphier-system-bus.c @@ -256,10 +256,23 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) uniphier_system_bus_set_reg(priv); + platform_set_drvdata(pdev, priv); + /* Now, the bus is configured. Populate platform_devices below it */ return of_platform_default_populate(dev->of_node, NULL, dev); } +static int __maybe_unused uniphier_system_bus_resume(struct device *dev) +{ + uniphier_system_bus_set_reg(dev_get_drvdata(dev)); + + return 0; +} + +static const struct dev_pm_ops uniphier_system_bus_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume) +}; + static const struct of_device_id uniphier_system_bus_match[] = { { .compatible = "socionext,uniphier-system-bus" }, { /* sentinel */ } @@ -271,6 +284,7 @@ static struct platform_driver uniphier_system_bus_driver = { .driver = { .name = "uniphier-system-bus", .of_match_table = uniphier_system_bus_match, + .pm = &uniphier_system_bus_pm_ops, }, }; module_platform_driver(uniphier_system_bus_driver); diff --git a/drivers/char/random.c b/drivers/char/random.c index afa3ce7d3e729a1ad1485d129aa1d26646292f74..8ad92707e45f23b890203d5c5468d47473acf636 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM print_once = true; #endif - pr_notice("random: %s called from %pF with crng_init=%d\n", + pr_notice("random: %s called from %pS with crng_init=%d\n", func_name, caller, crng_init); } diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c index c391a49aaaffb22fe1b02693368a447bbdc39dfd..b4cf2f699a21024b727aabcb5a265fb0d613e817 100644 --- a/drivers/clk/clk-gemini.c +++ b/drivers/clk/clk-gemini.c @@ -237,6 +237,18 @@ static int gemini_reset(struct reset_controller_dev *rcdev, BIT(GEMINI_RESET_CPU1) | BIT(id)); } +static int gemini_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return 0; +} + +static int gemini_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return 0; +} + static int gemini_reset_status(struct reset_controller_dev *rcdev, unsigned long id) { @@ -253,6 +265,8 @@ static int gemini_reset_status(struct reset_controller_dev *rcdev, static const struct reset_control_ops gemini_reset_ops = { .reset = gemini_reset, + .assert = gemini_reset_assert, + .deassert = gemini_reset_deassert, .status = gemini_reset_status, }; diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 43b0f2f08df2f7536d2f09e4f37984dde090e474..9cdf9d5050aca80858d3dc9dfcea416d91912b97 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c @@ -22,6 +22,7 @@ #include #include #include +#include #define SCI_CLK_SSC_ENABLE BIT(0) #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1) @@ -44,6 +45,7 @@ struct sci_clk_data { * @dev: Device pointer for the clock provider * @clk_data: Clock data * @clocks: Clocks array for this device + * @num_clocks: Total number of clocks for this provider */ struct sci_clk_provider { const struct ti_sci_handle *sci; @@ -51,6 +53,7 @@ struct sci_clk_provider { struct device *dev; const struct sci_clk_data *clk_data; struct clk_hw **clocks; + int num_clocks; }; /** @@ -58,7 +61,6 @@ struct sci_clk_provider { * @hw: Hardware clock cookie for common clock framework * @dev_id: Device index * @clk_id: Clock index - * @node: Clocks list link * @provider: Master clock provider * @flags: Flags for the clock */ @@ -66,7 +68,6 @@ struct sci_clk { struct clk_hw hw; u16 dev_id; u8 clk_id; - struct list_head node; struct sci_clk_provider *provider; u8 flags; }; @@ -367,6 +368,19 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider, return &sci_clk->hw; } +static int _cmp_sci_clk(const void *a, const void *b) +{ + const struct sci_clk *ca = a; + const struct sci_clk *cb = *(struct sci_clk **)b; + + if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id) + return 0; + if (ca->dev_id > cb->dev_id || + (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id)) + return 1; + return -1; +} + /** * sci_clk_get - Xlate function for getting clock handles * @clkspec: device tree clock specifier @@ -380,29 +394,22 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider, static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data) { struct sci_clk_provider *provider = data; - u16 dev_id; - u8 clk_id; - const struct sci_clk_data *clks = provider->clk_data; - struct clk_hw **clocks = provider->clocks; + struct sci_clk **clk; + struct sci_clk key; if (clkspec->args_count != 2) return ERR_PTR(-EINVAL); - dev_id = clkspec->args[0]; - clk_id = clkspec->args[1]; + key.dev_id = clkspec->args[0]; + key.clk_id = clkspec->args[1]; - while (clks->num_clks) { - if (clks->dev == dev_id) { - if (clk_id >= clks->num_clks) - return ERR_PTR(-EINVAL); - - return clocks[clk_id]; - } + clk = bsearch(&key, provider->clocks, provider->num_clocks, + sizeof(clk), _cmp_sci_clk); - clks++; - } + if (!clk) + return ERR_PTR(-ENODEV); - return ERR_PTR(-ENODEV); + return &(*clk)->hw; } static int ti_sci_init_clocks(struct sci_clk_provider *p) @@ -410,18 +417,29 @@ static int ti_sci_init_clocks(struct sci_clk_provider *p) const struct sci_clk_data *data = p->clk_data; struct clk_hw *hw; int i; + int num_clks = 0; while (data->num_clks) { - p->clocks = devm_kcalloc(p->dev, data->num_clks, - sizeof(struct sci_clk), - GFP_KERNEL); - if (!p->clocks) - return -ENOMEM; + num_clks += data->num_clks; + data++; + } + p->num_clocks = num_clks; + + p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk), + GFP_KERNEL); + if (!p->clocks) + return -ENOMEM; + + num_clks = 0; + + data = p->clk_data; + + while (data->num_clks) { for (i = 0; i < data->num_clks; i++) { hw = _sci_clk_build(p, data->dev, i); if (!IS_ERR(hw)) { - p->clocks[i] = hw; + p->clocks[num_clks++] = hw; continue; } diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 39eab69fe51a8a76791029ae718c575f16843630..44a5a535ca6334ebe17acf74ca38fea45ab86708 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -161,6 +161,13 @@ static int mpll_set_rate(struct clk_hw *hw, reg = PARM_SET(p->width, p->shift, reg, 1); writel(reg, mpll->base + p->reg_off); + p = &mpll->ssen; + if (p->width != 0) { + reg = readl(mpll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, 1); + writel(reg, mpll->base + p->reg_off); + } + p = &mpll->n2; reg = readl(mpll->base + p->reg_off); reg = PARM_SET(p->width, p->shift, reg, n2); diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h index d6feafe8bd6cea9ff9d61b0416f7c5d7858a044a..1629da9b414195c5159ee96449f5e5a22dec39c6 100644 --- a/drivers/clk/meson/clkc.h +++ b/drivers/clk/meson/clkc.h @@ -118,6 +118,7 @@ struct meson_clk_mpll { struct parm sdm_en; struct parm n2; struct parm en; + struct parm ssen; spinlock_t *lock; }; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index a897ea45327c985b189e9c5e81ac40028a682d7d..a7ea5f3da89d5357c2332f8a7fe4afa609cb9997 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -528,6 +528,11 @@ static struct meson_clk_mpll gxbb_mpll0 = { .shift = 14, .width = 1, }, + .ssen = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll0", diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index bb3f1de876b1a8cd6bd5f191c6f2ff0737751aa7..6ec512ad259805c34b57be3ee96eddef149599ee 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -267,6 +267,11 @@ static struct meson_clk_mpll meson8b_mpll0 = { .shift = 14, .width = 1, }, + .ssen = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll0", diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 0748a0b333c54c4a4a4b888ddd9e83a25d4323f7..9a6476aa7d818b3ad3ec84c9e68abccf21f04daf 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -1283,16 +1283,16 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = { PLL_36XX_RATE(600000000U, 100, 2, 1, 0), PLL_36XX_RATE(400000000U, 200, 3, 2, 0), - PLL_36XX_RATE(393216000U, 197, 3, 2, 25690), - PLL_36XX_RATE(361267200U, 301, 5, 2, 3671), + PLL_36XX_RATE(393216003U, 197, 3, 2, -25690), + PLL_36XX_RATE(361267218U, 301, 5, 2, 3671), PLL_36XX_RATE(200000000U, 200, 3, 3, 0), - PLL_36XX_RATE(196608000U, 197, 3, 3, -25690), - PLL_36XX_RATE(180633600U, 301, 5, 3, 3671), - PLL_36XX_RATE(131072000U, 131, 3, 3, 4719), + PLL_36XX_RATE(196608001U, 197, 3, 3, -25690), + PLL_36XX_RATE(180633609U, 301, 5, 3, 3671), + PLL_36XX_RATE(131072006U, 131, 3, 3, 4719), PLL_36XX_RATE(100000000U, 200, 3, 4, 0), - PLL_36XX_RATE(65536000U, 131, 3, 4, 4719), - PLL_36XX_RATE(49152000U, 197, 3, 5, 25690), - PLL_36XX_RATE(32768000U, 131, 3, 5, 4719), + PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719), + PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690), + PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719), }; static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = { diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5372bf8be5e6fb8b0a43185279d33cc1cc30bd3b..31d7ffda9aab0c826627c14e3f51808d4639a6b4 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = { .hw.init = CLK_HW_INIT_PARENTS("cpu", cpu_parents, &ccu_mux_ops, - CLK_IS_CRITICAL), + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), } }; diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index f99abc1106f0cb725ff7c63e9bc84b75596a4e75..08ef69945ffbf425efa001d48c6c9b5b0f4333eb 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -186,6 +186,13 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); + /* + * If the clock was already enabled by the firmware mark it as critical + * to avoid it being gated by the clock framework if no driver owns it. + */ + if (plt_clk_is_enabled(&pclk->hw)) + init.flags |= CLK_IS_CRITICAL; + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index fcae5ca6ac9234debecf5a4f84521405aa64a74c..54a67f8a28ebfb929610ea07697d7c7e4eb33de1 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -262,7 +262,7 @@ config CLKSRC_LPC32XX config CLKSRC_PISTACHIO bool "Clocksource for Pistachio SoC" if COMPILE_TEST - depends on HAS_IOMEM + depends on GENERIC_CLOCKEVENTS && HAS_IOMEM select TIMER_OF help Enables the clocksource for the Pistachio SoC. diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index aae87c4c546ee06203195654087cd9ef8b43d57a..72bbfccef1132c3a962d9e93d4bd6c459f1b368d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1440,7 +1440,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) * While unlikely, it's theoretically possible that none of the frames * in a timer expose the combination of feature we want. */ - for (i = i; i < timer_count; i++) { + for (i = 0; i < timer_count; i++) { timer = &timers[i]; frame = arch_timer_mem_find_best_frame(timer); diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index bc48cbf6a7957198d4ecd6bf91840df350843d48..269db74a065815e2211194fbda9774dcf1e80041 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get irq\n"); - return -EINVAL; + return irq; } /* map memory, let base point to the STI instance */ @@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev) if (IS_ERR(p->base)) return PTR_ERR(p->base); - if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, - IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, - dev_name(&pdev->dev), p)) { + ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt, + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, + dev_name(&pdev->dev), p); + if (ret) { dev_err(&pdev->dev, "failed to request low IRQ\n"); - return -ENOENT; + return ret; } /* get hold of clock */ diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index d509b500a7b5f8565514352a0313cb4704976042..4d7aef9d9c15422df0e9973e72379b2e21256f47 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -128,9 +128,9 @@ static __init int timer_base_init(struct device_node *np, const char *name = of_base->name ? of_base->name : np->full_name; of_base->base = of_io_request_and_map(np, of_base->index, name); - if (!of_base->base) { + if (IS_ERR(of_base->base)) { pr_err("Failed to iomap (%s)\n", name); - return -ENXIO; + return PTR_ERR(of_base->base); } return 0; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6cd50352563894bea24866d2d36d6b815d0abc20..65ee4fcace1f260632cf53c150c3260cc27240ad 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1613,8 +1613,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) static inline int32_t get_avg_frequency(struct cpudata *cpu) { - return mul_ext_fp(cpu->sample.core_avg_perf, - cpu->pstate.max_pstate_physical * cpu->pstate.scaling); + return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); } static inline int32_t get_avg_pstate(struct cpudata *cpu) @@ -1922,13 +1921,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) return 0; } -static unsigned int intel_pstate_get(unsigned int cpu_num) -{ - struct cpudata *cpu = all_cpu_data[cpu_num]; - - return cpu ? get_avg_frequency(cpu) : 0; -} - static void intel_pstate_set_update_util_hook(unsigned int cpu_num) { struct cpudata *cpu = all_cpu_data[cpu_num]; @@ -2169,7 +2161,6 @@ static struct cpufreq_driver intel_pstate = { .setpolicy = intel_pstate_set_policy, .suspend = intel_pstate_hwp_save_state, .resume = intel_pstate_resume, - .get = intel_pstate_get, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 37b0698b7193e60be4107a8be107285e55c65877..42896a67aeae38325cbda2acb7ca655c1b43915c 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, return -1; } +extern u32 pnv_get_supported_cpuidle_states(void); static int powernv_add_idle_states(void) { struct device_node *power_mgt; @@ -248,6 +249,8 @@ static int powernv_add_idle_states(void) const char *names[CPUIDLE_STATE_MAX]; u32 has_stop_states = 0; int i, rc; + u32 supported_flags = pnv_get_supported_cpuidle_states(); + /* Currently we have snooze statically defined */ @@ -362,6 +365,13 @@ static int powernv_add_idle_states(void) for (i = 0; i < dt_idle_states; i++) { unsigned int exit_latency, target_residency; bool stops_timebase = false; + + /* + * Skip the platform idle state whose flag isn't in + * the supported_cpuidle_states flag mask. + */ + if ((flags[i] & supported_flags) != flags[i]) + continue; /* * If an idle state has exit latency beyond * POWERNV_THRESHOLD_LATENCY_NS then don't use it diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 193204dfbf3a084f7540621fd377b98f2cb7e01a..4b75084fabad23e29b81048d8d93f38888aea0ec 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig" config CRYPTO_DEV_BCM_SPU tristate "Broadcom symmetric crypto/hash acceleration support" depends on ARCH_BCM_IPROC - depends on BCM_PDC_MBOX + depends on MAILBOX default m select CRYPTO_DES select CRYPTO_MD5 diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index ef04c974831732b0c27977be838bbc10e2a9fd41..bf7ac621c591dcad6737c42edb06d9392c27c8f9 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode, break; case HASH_ALG_SHA3_512: *spu2_type = SPU2_HASH_TYPE_SHA3_512; + break; case HASH_ALG_LAST: default: err = -EINVAL; diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index ae44a464cd2d20509e435c4b0b7a34e394abbc14..9ccefb9b7232ef68501c04db31aa7ac3ee983525 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -18,8 +18,9 @@ #define SE_GROUP 0 #define DRIVER_VERSION "1.0" +#define FW_DIR "cavium/" /* SE microcode */ -#define SE_FW "cnn55xx_se.fw" +#define SE_FW FW_DIR "cnn55xx_se.fw" static const char nitrox_driver_name[] = "CNN55XX"; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index e7f87ac126858519a0477c1254dc9e02424146a9..1fabd4aee81b75cd9e52785e6d09bb82dad58812 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; struct safexcel_crypto_priv *priv; - u64 dma_mask; int i, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask)) - dma_mask = DMA_BIT_MASK(64); - ret = dma_set_mask_and_coherent(dev, dma_mask); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) goto err_clk; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 8527a5899a2f7b6a3245a4a52ca4c0283b2f4666..3f819399cd95519a9956ed1d3ecba76fa2aa62b4 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, if (ret) return ret; - memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); - memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); - - for (i = 0; i < ARRAY_SIZE(istate.state); i++) { + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || ctx->opad[i] != le32_to_cpu(ostate.state[i])) { ctx->base.needs_inv = true; @@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, } } + memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); + memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); + return 0; } diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 427cbe01272926acd38eba211dceb28fec61db64..dadc4a808df5a37764adf45df9f90dc1fce5b03a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt, req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &crypt->icv_rev_aes); if (unlikely(!req_ctx->hmac_virt)) - goto free_buf_src; + goto free_buf_dst; if (!encrypt) { scatterwalk_map_and_copy(req_ctx->hmac_virt, req->src, cryptlen, authsize, 0); @@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt, BUG_ON(qmgr_stat_overflow(SEND_QID)); return -EINPROGRESS; -free_buf_src: - free_buf_chain(dev, req_ctx->src, crypt->src_buf); free_buf_dst: free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); +free_buf_src: + free_buf_chain(dev, req_ctx->src, crypt->src_buf); crypt->ctl_flags = CTL_FLAG_UNUSED; return -ENOMEM; } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index ce9e563e6e1d47715a776405c3b86b283900a283..b699aac268a6c3c172e3507dff9d17005f79ed2f 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -46,6 +46,8 @@ void dax_read_unlock(int id) EXPORT_SYMBOL_GPL(dax_read_unlock); #ifdef CONFIG_BLOCK +#include + int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, pgoff_t *pgoff) { @@ -59,6 +61,14 @@ int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, } EXPORT_SYMBOL(bdev_dax_pgoff); +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) +{ + if (!blk_queue_dax(bdev->bd_queue)) + return NULL; + return fs_dax_get_by_host(bdev->bd_disk->disk_name); +} +EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); + /** * __bdev_dax_supported() - Check if the device supports dax for filesystem * @sb: The superblock of the device @@ -278,6 +288,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc) } EXPORT_SYMBOL_GPL(dax_write_cache); +bool dax_write_cache_enabled(struct dax_device *dax_dev) +{ + return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(dax_write_cache_enabled); + bool dax_alive(struct dax_device *dax_dev) { lockdep_assert_held(&dax_srcu); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index d7e219d2669daf01c935bd18c0836cb88a34e075..66fb40d0ebdbbec521499cd58cf2e1d55c195878 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file) { struct sync_file *sync_file = file->private_data; - if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) + if (test_bit(POLL_ENABLED, &sync_file->flags)) dma_fence_remove_callback(sync_file->fence, &sync_file->cb); dma_fence_put(sync_file->fence); kfree(sync_file); @@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) poll_wait(file, &sync_file->wq, wait); - if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { + if (list_empty(&sync_file->cb.node) && + !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) { if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, fence_check_cb_func) < 0) wake_up_all(&sync_file->wq); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index f235eae04c16ed1096689329c0462df74e05a36d..461d6fc3688b61128f280a84c8ed3e1073488ab0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -504,6 +504,7 @@ config GPIO_XGENE_SB depends on ARCH_XGENE && OF_GPIO select GPIO_GENERIC select GPIOLIB_IRQCHIP + select IRQ_DOMAIN_HIERARCHY help This driver supports the GPIO block within the APM X-Gene Standby Domain. Say yes here to enable the GPIO functionality. diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index fb8d304cfa171fcb3a5372f413a54783a7e52b96..0ecd2369c2cad0daa5e08696ab85b91af5235a26 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -132,7 +132,7 @@ static int gpio_exar_probe(struct platform_device *pdev) if (!p) return -ENOMEM; - ret = device_property_read_u32(&pdev->dev, "linux,first-pin", + ret = device_property_read_u32(&pdev->dev, "exar,first-pin", &first_pin); if (ret) return ret; diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c index 6313c50bb91be057a9de80a2d22277270518e648..a121c8f1061005380464d4cf96ac68a4905b293c 100644 --- a/drivers/gpio/gpio-lp87565.c +++ b/drivers/gpio/gpio-lp87565.c @@ -26,6 +26,27 @@ struct lp87565_gpio { struct regmap *map; }; +static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct lp87565_gpio *gpio = gpiochip_get_data(chip); + int ret, val; + + ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); + if (ret < 0) + return ret; + + return !!(val & BIT(offset)); +} + +static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct lp87565_gpio *gpio = gpiochip_get_data(chip); + + regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, + BIT(offset), value ? BIT(offset) : 0); +} + static int lp87565_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { @@ -54,30 +75,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip, { struct lp87565_gpio *gpio = gpiochip_get_data(chip); + lp87565_gpio_set(chip, offset, value); + return regmap_update_bits(gpio->map, LP87565_REG_GPIO_CONFIG, - BIT(offset), !value ? BIT(offset) : 0); -} - -static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset) -{ - struct lp87565_gpio *gpio = gpiochip_get_data(chip); - int ret, val; - - ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); - if (ret < 0) - return ret; - - return !!(val & BIT(offset)); -} - -static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) -{ - struct lp87565_gpio *gpio = gpiochip_get_data(chip); - - regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, - BIT(offset), value ? BIT(offset) : 0); + BIT(offset), BIT(offset)); } static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset) diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 3abea3f0b307e143848686dd935ee0745a414eec..92692251ade1a13b7c2df99f97598584c3c05081 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -424,6 +424,9 @@ static int mxc_gpio_probe(struct platform_device *pdev) return PTR_ERR(port->base); port->irq_high = platform_get_irq(pdev, 1); + if (port->irq_high < 0) + port->irq_high = 0; + port->irq = platform_get_irq(pdev, 0); if (port->irq < 0) return port->irq; diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 88529d3c06c9af4d6e4a69ea71557939ba0ff48e..506c6a67c5fcb951154faad9c4b73cb7b8c45ac3 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -360,7 +360,7 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc) { int port; int pin; - int unmasked = 0; + bool unmasked = false; int gpio; u32 lvl; unsigned long sta; @@ -384,8 +384,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc) * before executing the handler so that we don't * miss edges */ - if (lvl & (0x100 << pin)) { - unmasked = 1; + if (!unmasked && lvl & (0x100 << pin)) { + unmasked = true; chained_irq_exit(chip, desc); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9568708a550b55b79824784270f98b3260a6f70f..cd003b74512f692e2ec67eca68bd1f4d80db0e39 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) { struct lineevent_state *le = p; struct gpioevent_data ge; - int ret; + int ret, level; ge.timestamp = ktime_get_real_ns(); + level = gpiod_get_value_cansleep(le->desc); if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { - int level = gpiod_get_value_cansleep(le->desc); - if (level) /* Emit low-to-high event */ ge.id = GPIOEVENT_EVENT_RISING_EDGE; else /* Emit high-to-low event */ ge.id = GPIOEVENT_EVENT_FALLING_EDGE; - } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { + } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) { /* Emit low-to-high event */ ge.id = GPIOEVENT_EVENT_RISING_EDGE; - } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { + } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) { /* Emit high-to-low event */ ge.id = GPIOEVENT_EVENT_FALLING_EDGE; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f621ee115c98d4e5d5a4faa0d845864dc65a0b8c..5e771bc11b0054aa667bb75291da4cb9911521df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) result = idr_find(&fpriv->bo_list_handles, id); if (result) { - if (kref_get_unless_zero(&result->refcount)) + if (kref_get_unless_zero(&result->refcount)) { + rcu_read_unlock(); mutex_lock(&result->lock); - else + } else { + rcu_read_unlock(); result = NULL; + } + } else { + rcu_read_unlock(); } - rcu_read_unlock(); return result; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 38f739fb727bd2da4c81ad198ae18e46bee8e040..6558a3ed57a7f6a1127f81aa0e1b5a861aa21b8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -359,7 +359,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) head = bo->mn_list.next; bo->mn = NULL; - list_del(&bo->mn_list); + list_del_init(&bo->mn_list); if (list_empty(head)) { struct amdgpu_mn_node *node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6899180b265721831837282e6c66d12a8bccf48..c586f44312f9772fb93f8b1442827c842d610594 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *f = e->fence; struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + if (dma_fence_is_signaled(f)) { + hash_del(&e->node); + dma_fence_put(f); + kmem_cache_free(amdgpu_sync_slab, e); + continue; + } if (ring && s_fence) { /* For fences from the same ring it is sufficient * when they are scheduled. @@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, } } - if (dma_fence_is_signaled(f)) { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - return f; } diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h index 18fd01f3e4b245d7d9f4cf24a97daa20beeaee66..003a131bad474db5d28584735c0e399b1def20d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h @@ -1,24 +1,25 @@ - /* -*************************************************************************************************** -* -* Trade secret of Advanced Micro Devices, Inc. -* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) -* -* All rights reserved. This notice is intended as a precaution against inadvertent publication and -* does not imply publication or any waiver of confidentiality. The year included in the foregoing -* notice is the year of creation of the work. -* -*************************************************************************************************** -*/ -/** -*************************************************************************************************** -* @brief gfx9 Clearstate Definitions -*************************************************************************************************** -* -* Do not edit! This is a machine-generated file! -* -*/ + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ static const unsigned int gfx9_SECT_CONTEXT_def_1[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3a0b69b09ed62ed9dd18a8b4dbfad595b7d30030..c9b9c88231aa5070b709e97ce558a78fcfb43097 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); + + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f45fb0f022b3c6480b08afa545f4e3f4f9381610..4267fa417997aeaf6336297f25a05c31c7ae4eb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, pitcairn_mgcg_cgcg_init, (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + break; case CHIP_VERDE: amdgpu_program_register_sequence(adev, verde_golden_registers, @@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); + break; case CHIP_HAINAN: amdgpu_program_register_sequence(adev, hainan_golden_registers, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d6f097f44b6cfe8587b64c16cfa6af4663c729e7..197174e562d208e773492f9da2280fd03414c540 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) pp_table->AvfsGbCksOff.m2_shift = 12; pp_table->AvfsGbCksOff.b_shift = 0; - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].sclk_offset == 0) - pp_table->StaticVoltageOffsetVid[i] = 248; - else - pp_table->StaticVoltageOffsetVid[i] = - (uint8_t)(dep_table->entries[i].sclk_offset * - VOLTAGE_VID_OFFSET_SCALE2 / - VOLTAGE_VID_OFFSET_SCALE1); - } + for (i = 0; i < dep_table->count; i++) + pp_table->StaticVoltageOffsetVid[i] = + convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != data->disp_clk_quad_eqn_a) && diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 5c26488e7a2d7a0320ddf321375b8ff4c200185f..0529e500c5341ed5e0d93bb658cd42d9288b13ad 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); - if (ret) + if (ret && ret != -ENODEV) return ret; /* Shut down GPIO is optional */ diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 213fb837e1c40fe79bf536d54b083d99dee1c192..08af8d6b844b67e0653f93c5dc012706d9a13b19 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m, DP_DETAILED_CAP_INFO_AVAILABLE; int clk; int bpc; - char id[6]; + char id[7]; int len; uint8_t rev[2]; int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; @@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_puts(m, "\t\tType: N/A\n"); } + memset(id, 0, sizeof(id)); drm_dp_downstream_id(aux, id); seq_printf(m, "\t\tID: %s\n", id); @@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_printf(m, "\t\tHW: %d.%d\n", (rev[0] & 0xf0) >> 4, rev[0] & 0xf); - len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); + len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); if (len > 0) seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 5bd93169dac2059a0981cc2f24b8c9032447ba9e..6463fc2c736fd4db5881a259b21848328b7f6cea 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, if (ret) return ret; - if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { - DRM_ERROR("relocation %u outside object", i); + if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) { + DRM_ERROR("relocation %u outside object\n", i); return -EINVAL; } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 1d185347c64c06f7b749061909390ea0953ed3f8..305dc3d4ff772a7fe8a1ad80b9a2897f04a4cca1 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -75,6 +75,7 @@ config DRM_EXYNOS_DP config DRM_EXYNOS_HDMI bool "HDMI" depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON + select CEC_CORE if CEC_NOTIFIER help Choose this option if you want to use Exynos HDMI for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 35a8dfc93836d83f0223d3e030dea597d57abb55..242bd50faa26018bee34e3ff54faf7b3eaa5dbac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) struct component_match *match; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); match = exynos_drm_match_add(&pdev->dev); if (IS_ERR(match)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index a11b79596e2f8d483c113b78d1df7568d59a48d4..b6a46d9a016e57982f69a6e6a02444996cb19cb5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) return ret; dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); - if (!dsi->bridge_node) - return -EINVAL; return 0; } @@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, return ret; } - bridge = of_drm_find_bridge(dsi->bridge_node); - if (bridge) - drm_bridge_attach(encoder, bridge, NULL); + if (dsi->bridge_node) { + bridge = of_drm_find_bridge(dsi->bridge_node); + if (bridge) + drm_bridge_attach(encoder, bridge, NULL); + } return mipi_dsi_host_register(&dsi->dsi_host); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index d48fd7c918f880df0b3a27da5e8fa4f09c011b04..73217c281c9a87e51ac2a3d8ddf235b227ba2a1d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -145,13 +145,19 @@ static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { + const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; struct drm_gem_object *obj; struct drm_framebuffer *fb; int i; int ret; - for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { + for (i = 0; i < info->num_planes; i++) { + unsigned int height = (i == 0) ? mode_cmd->height : + DIV_ROUND_UP(mode_cmd->height, info->vsub); + unsigned long size = height * mode_cmd->pitches[i] + + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); @@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, } exynos_gem[i] = to_exynos_gem(obj); + + if (size > exynos_gem[i]->size) { + i++; + ret = -EINVAL; + goto err; + } } fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index e45720543a453d22c29150807815666ca0afffd9..16bbee897e0db9e7b3025f039a047e6580a59e3b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); - int ret; - mic->bridge.funcs = &mic_bridge_funcs; - mic->bridge.of_node = dev->of_node; mic->bridge.driver_private = mic; - ret = drm_bridge_add(&mic->bridge); - if (ret) - DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); - return ret; + return 0; } static void exynos_mic_unbind(struct device *dev, struct device *master, @@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master, already_disabled: mutex_unlock(&mic_mutex); - - drm_bridge_remove(&mic->bridge); } static const struct component_ops exynos_mic_component_ops = { @@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mic); + mic->bridge.funcs = &mic_bridge_funcs; + mic->bridge.of_node = dev->of_node; + + ret = drm_bridge_add(&mic->bridge); + if (ret) { + DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); + return ret; + } + pm_runtime_enable(dev); ret = component_add(dev, &exynos_mic_component_ops); @@ -479,8 +480,13 @@ static int exynos_mic_probe(struct platform_device *pdev) static int exynos_mic_remove(struct platform_device *pdev) { + struct exynos_mic *mic = platform_get_drvdata(pdev); + component_del(&pdev->dev, &exynos_mic_component_ops); pm_runtime_disable(&pdev->dev); + + drm_bridge_remove(&mic->bridge); + return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 06bfbe400cf1d2ea23ec361a0fa1364a139ac43d..d3b69d66736fc9f71fbbb4c7aa0e86646035a7e4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder) */ cancel_delayed_work(&hdata->hotplug_work); cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); - - hdmiphy_disable(hdata); } static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { @@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata) return hdmi_bridge_init(hdata); } -static struct of_device_id hdmi_match_types[] = { +static const struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos4210-hdmi", .data = &exynos4210_hdmi_driver_data, @@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int exynos_hdmi_suspend(struct device *dev) +static int __maybe_unused exynos_hdmi_suspend(struct device *dev) { struct hdmi_context *hdata = dev_get_drvdata(dev); @@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev) return 0; } -static int exynos_hdmi_resume(struct device *dev) +static int __maybe_unused exynos_hdmi_resume(struct device *dev) { struct hdmi_context *hdata = dev_get_drvdata(dev); int ret; @@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops exynos_hdmi_pm_ops = { SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 6bed4f3ffcd6bd8592c51ee398a3ccdbe10355e1..a998a8dd783cbc347792f88d56141ca34e4716e1 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { .atomic_check = mixer_atomic_check, }; -static struct mixer_drv_data exynos5420_mxr_drv_data = { +static const struct mixer_drv_data exynos5420_mxr_drv_data = { .version = MXR_VER_128_0_0_184, .is_vp_enabled = 0, }; -static struct mixer_drv_data exynos5250_mxr_drv_data = { +static const struct mixer_drv_data exynos5250_mxr_drv_data = { .version = MXR_VER_16_0_33_0, .is_vp_enabled = 0, }; -static struct mixer_drv_data exynos4212_mxr_drv_data = { +static const struct mixer_drv_data exynos4212_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, }; -static struct mixer_drv_data exynos4210_mxr_drv_data = { +static const struct mixer_drv_data exynos4210_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, .has_sclk = 1, }; -static struct of_device_id mixer_match_types[] = { +static const struct of_device_id mixer_match_types[] = { { .compatible = "samsung,exynos4210-mixer", .data = &exynos4210_mxr_drv_data, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 2deb05f618fb11054064122a4b86f468d148c73b..7cb0818a13debecc1d3f08e4fb10042a3536b1e9 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) { struct intel_gvt_irq *irq = &gvt->irq; struct intel_vgpu *vgpu; - bool have_enabled_pipe = false; int pipe, id; if (WARN_ON(!mutex_is_locked(&gvt->lock))) return; - hrtimer_cancel(&irq->vblank_timer.timer); - for_each_active_vgpu(gvt, vgpu, id) { for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { - have_enabled_pipe = - pipe_is_enabled(vgpu, pipe); - if (have_enabled_pipe) - break; + if (pipe_is_enabled(vgpu, pipe)) + goto out; } } - if (have_enabled_pipe) - hrtimer_start(&irq->vblank_timer.timer, - ktime_add_ns(ktime_get(), irq->vblank_timer.period), - HRTIMER_MODE_ABS); + /* all the pipes are disabled */ + hrtimer_cancel(&irq->vblank_timer.timer); + return; + +out: + hrtimer_start(&irq->vblank_timer.timer, + ktime_add_ns(ktime_get(), irq->vblank_timer.period), + HRTIMER_MODE_ABS); + } static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 700050556242480e6fbf8eb4a8d97c6307e9390d..1648887d3f55248cf055524a2f0e341062f0cd8d 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -46,6 +46,8 @@ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) +static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); + static int context_switch_events[] = { [RCS] = RCS_AS_CONTEXT_SWITCH, [BCS] = BCS_AS_CONTEXT_SWITCH, @@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int complete_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct intel_vgpu_execlist *execlist = - &vgpu->execlist[workload->ring_id]; + int ring_id = workload->ring_id; + struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; struct intel_vgpu_workload *next_workload; - struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; + struct list_head *next = workload_q_head(vgpu, ring_id)->next; bool lite_restore = false; int ret; @@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) release_shadow_batch_buffer(workload); release_shadow_wa_ctx(&workload->wa_ctx); - if (workload->status || vgpu->resetting) + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { + /* if workload->status is not successful means HW GPU + * has occurred GPU hang or something wrong with i915/GVT, + * and GVT won't inject context switch interrupt to guest. + * So this error is a vGPU hang actually to the guest. + * According to this we should emunlate a vGPU hang. If + * there are pending workloads which are already submitted + * from guest, we should clean them up like HW GPU does. + * + * if it is in middle of engine resetting, the pending + * workloads won't be submitted to HW GPU and will be + * cleaned up during the resetting process later, so doing + * the workload clean up here doesn't have any impact. + **/ + clean_workloads(vgpu, ENGINE_MASK(ring_id)); goto out; + } - if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { + if (!list_empty(workload_q_head(vgpu, ring_id))) { struct execlist_ctx_descriptor_format *this_desc, *next_desc; next_workload = container_of(next, diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 5dad9298b2d5dbbe7b626895806e6008047bbd6a..a26c1705430eb2134d002b68ddcb26d272684bd9 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct intel_gvt_mmio_info *e; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + int num = gvt->mmio.num_mmio_block; struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; - int i; + int i, j; int ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size; @@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) hash_for_each(gvt->mmio.mmio_info_table, i, e, node) *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); + for (i = 0; i < num; i++, block++) { + for (j = 0; j < block->size; j += 4) + *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) = + I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET( + block->offset) + j)); + } + memcpy(gvt->firmware.mmio, p, info->mmio_size); crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3a74e79eac2f6c13fef32e1611b539db7b8f46c3..2964a4d01a66da5d2fb06d256ed35fa83fa96a38 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -149,7 +149,7 @@ struct intel_vgpu { bool active; bool pv_notified; bool failsafe; - bool resetting; + unsigned int resetting_eng; void *sched_data; struct vgpu_sched_ctl sched_ctl; @@ -195,6 +195,15 @@ struct intel_gvt_fence { unsigned long vgpu_allocated_fence_num; }; +/* Special MMIO blocks. */ +struct gvt_mmio_block { + unsigned int device; + i915_reg_t offset; + unsigned int size; + gvt_mmio_func read; + gvt_mmio_func write; +}; + #define INTEL_GVT_MMIO_HASH_BITS 11 struct intel_gvt_mmio { @@ -214,6 +223,9 @@ struct intel_gvt_mmio { /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) + struct gvt_mmio_block *mmio_block; + unsigned int num_mmio_block; + DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); unsigned int num_tracked_mmio; }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 17febe830ff6984e06bb81cb91601a76b67d5f2a..feed9921b3b3eb05e6e8dce5e1b510f4d6fc9479 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) return 0; } -/* Special MMIO blocks. */ -static struct gvt_mmio_block { - unsigned int device; - i915_reg_t offset; - unsigned int size; - gvt_mmio_func read; - gvt_mmio_func write; -} gvt_mmio_blocks[] = { - {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, - {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, - pvinfo_mmio_read, pvinfo_mmio_write}, - {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, -}; - static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, unsigned int offset) { unsigned long device = intel_gvt_get_device_type(gvt); - struct gvt_mmio_block *block = gvt_mmio_blocks; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + int num = gvt->mmio.num_mmio_block; int i; - for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { + for (i = 0; i < num; i++, block++) { if (!(device & block->device)) continue; if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && @@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) gvt->mmio.mmio_attribute = NULL; } +/* Special MMIO blocks. */ +static struct gvt_mmio_block mmio_blocks[] = { + {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, + {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, + {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, + pvinfo_mmio_read, pvinfo_mmio_write}, + {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, + {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, + {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, +}; + /** * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device * @gvt: GVT device @@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; } + gvt->mmio.mmio_block = mmio_blocks; + gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); + gvt_dbg_mmio("traced %u virtual mmio registers\n", gvt->mmio.num_tracked_mmio); return 0; @@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, gvt_mmio_func func; int ret; - if (WARN_ON(bytes > 4)) + if (WARN_ON(bytes > 8)) return -EINVAL; /* diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4f7057d62d88b393ce77670f9100bc2d3b246014..22e08eb2d0b7c66faf01741656fb33d1535925f3 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) i915_gem_request_put(fetch_and_zero(&workload->req)); - if (!workload->status && !vgpu->resetting) { + if (!workload->status && !(vgpu->resetting_eng & + ENGINE_MASK(ring_id))) { update_guest_context(workload); for_each_set_bit(event, workload->pending_events, diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 90c14e6e3ea06b8de36d90284132659eb80f72c6..3deadcbd5a245c039169f1a10c6c91cc791d3a66 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", vgpu->id, dmlr, engine_mask); - vgpu->resetting = true; + + vgpu->resetting_eng = resetting_eng; intel_vgpu_stop_schedule(vgpu); /* @@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, mutex_lock(&gvt->lock); } - intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); + intel_vgpu_reset_execlist(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { @@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, } } - vgpu->resetting = false; + vgpu->resetting_eng = 0; gvt_dbg_core("reset vgpu%d done\n", vgpu->id); gvt_dbg_core("------------------------------------------\n"); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 00d8967c8512048f5fb46f629c9c76ace1161bbf..d1bd53b73738446f5d01cc7aae2b585f675de6b7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); - if (IS_GEN9_BC(dev_priv)) + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c index 152f16c11878ef937e48b9139c9b8ab844322df3..348b29a845c961c73b1605b23212692b438f0f19 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/i915_gem_clflush.c @@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence, return NOTIFY_DONE; } -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { struct clflush *clflush; @@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, */ if (!i915_gem_object_has_struct_page(obj)) { obj->cache_dirty = false; - return; + return false; } /* If the GPU is snooping the contents of the CPU cache, @@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, * tracking. */ if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) - return; + return false; trace_i915_gem_object_clflush(obj); @@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, } obj->cache_dirty = false; + return true; } diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h index 2455a7820937b0f461839de713693a9010950997..f390247561b37691d615fe0182d47cf40bcc4277 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.h +++ b/drivers/gpu/drm/i915/i915_gem_clflush.h @@ -28,7 +28,7 @@ struct drm_i915_private; struct drm_i915_gem_object; -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags); #define I915_CLFLUSH_FORCE BIT(0) #define I915_CLFLUSH_SYNC BIT(1) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 39ed58a21fc1f517f56d179d48b3b9bef00e9410..e1e971ee2ed57aae59dd505a49e8a6ee546e58ff 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, } static bool -needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, - struct intel_engine_cs *engine, - struct i915_gem_context *to) +needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine) { + struct i915_gem_context *from = engine->legacy_active_context; + if (!ppgtt) return false; /* Always load the ppgtt on first use */ - if (!engine->legacy_active_context) + if (!from) return true; /* Same context without new entries, skip */ - if (engine->legacy_active_context == to && + if ((!from->ppgtt || from->ppgtt == ppgtt) && !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; @@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (skip_rcs_switch(ppgtt, engine, to)) return 0; - if (needs_pd_load_pre(ppgtt, engine, to)) { + if (needs_pd_load_pre(ppgtt, engine)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load * Register Immediate commands in Ring Buffer before submitting @@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; - if (needs_pd_load_pre(ppgtt, engine, to)) { + if (needs_pd_load_pre(ppgtt, engine)) { int ret; trace_switch_mm(engine, to); @@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } + engine->legacy_active_context = to; return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 054b2e54cdafd765e82f9a86276c863744f3cd14..e9503f6d110016b86311003349ff5b8ef70613d5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, eb->args->flags |= __EXEC_HAS_RELOC; } - entry->flags |= __EXEC_OBJECT_HAS_PIN; - GEM_BUG_ON(eb_vma_misplaced(entry, vma)); - if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { err = i915_vma_get_fence(vma); if (unlikely(err)) { @@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, entry->flags |= __EXEC_OBJECT_HAS_FENCE; } + entry->flags |= __EXEC_OBJECT_HAS_PIN; + GEM_BUG_ON(eb_vma_misplaced(entry, vma)); + return 0; } @@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) + if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) return -EFAULT; do { @@ -1775,7 +1775,7 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb) } } - return err ?: have_copy; + return err; } static int eb_relocate(struct i915_execbuffer *eb) @@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) int err; for (i = 0; i < count; i++) { - const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct i915_vma *vma = exec_to_vma(entry); struct drm_i915_gem_object *obj = vma->obj; @@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) eb->request->capture_list = capture; } + if (unlikely(obj->cache_dirty && !obj->cache_coherent)) { + if (i915_gem_clflush_object(obj, 0)) + entry->flags &= ~EXEC_OBJECT_ASYNC; + } + if (entry->flags & EXEC_OBJECT_ASYNC) goto skip_flushes; - if (unlikely(obj->cache_dirty && !obj->cache_coherent)) - i915_gem_clflush_object(obj, 0); - err = i915_gem_request_await_object (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); if (err) @@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_unlock; err = eb_relocate(&eb); - if (err) + if (err) { /* * If the user expects the execobject.offset and * reloc.presumed_offset to be an exact match, @@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, * relocation. */ args->flags &= ~__EXEC_HAS_RELOC; - if (err < 0) goto err_vma; + } if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 7032c542a9b1d007c4bc69e328db040b88efc15d..4dd4c2159a92e26d91fd98e560c587a91b964e48 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) goto err_unpin; } + ret = req->engine->emit_flush(req, EMIT_INVALIDATE); + if (ret) + goto err_unpin; + ret = req->engine->emit_bb_start(req, so->batch_offset, so->batch_size, I915_DISPATCH_SECURE); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 1032f98add112a66a19fb186a2b28de773caadf8..77fb3980813143d2d9e3432c0ebb994a4bcad032 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) return true; case MUTEX_TRYLOCK_FAILED: + *unlock = false; + preempt_disable(); do { cpu_relax(); if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - case MUTEX_TRYLOCK_SUCCESS: *unlock = true; - return true; + break; } } while (!need_resched()); + preempt_enable(); + return *unlock; - return false; + case MUTEX_TRYLOCK_SUCCESS: + *unlock = true; + return true; } BUG(); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 9cd22f83b0cfaee680ed06c5bde67db6fc89d0fa..f33d90226704108e71ee5662e01977e32b627fcb 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req) u32 *cs; int i; - cs = intel_ring_begin(req, n_flex_regs * 2 + 4); + cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); + *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4a673fc1a4320f957595dffe5ff8c7c53fa20adf..20cf272c97b1f8e16328a0304d25539797f07631 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma) static inline void __i915_vma_unpin(struct i915_vma *vma) { - GEM_BUG_ON(!i915_vma_is_pinned(vma)); vma->flags--; } static inline void i915_vma_unpin(struct i915_vma *vma) { + GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); __i915_vma_unpin(vma); } diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 306c6b06b330bfc57f75a992c60468cb9d88e81c..17c4ae7e4e7c51e85de97cb8c803b280115de8cf 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) } /* Program the max register to clamp values > 1.0. */ + i = lut_size - 1; I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), drm_color_lut_extract(lut[i].red, 16)); I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 80e96f1f49d2d09deb59ec08e6ebe78ac07b8cf3..d3b3252a874252641af555d8797a72e20056a34b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, if (dev_priv->vbt.edp.low_vswing) { if (voltage == VOLTAGE_INFO_0_85V) { *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); - return cnl_ddi_translations_dp_0_85V; + return cnl_ddi_translations_edp_0_85V; } else if (voltage == VOLTAGE_INFO_0_95V) { *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); return cnl_ddi_translations_edp_0_95V; @@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level) val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); val &= ~LOADGEN_SELECT; - if (((rate < 600000) && (width == 4) && (ln >= 1)) || - ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { + if ((rate <= 600000 && width == 4 && ln >= 1) || + (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dec9e58545a111b6f8d5aa9957c924d9e2ebcf16..cc484b56eeaa33213a0832f43770132a1b165df1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv) intel_finish_page_flip_cs(dev_priv, crtc->pipe); } -static void intel_update_primary_planes(struct drm_device *dev) -{ - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - if (plane_state->base.visible) { - trace_intel_update_plane(&plane->base, - to_intel_crtc(crtc)); - - plane->update_plane(plane, - to_intel_crtc_state(crtc->state), - plane_state); - } - } -} - static int __intel_display_resume(struct drm_device *dev, struct drm_atomic_state *state, @@ -3499,6 +3479,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state; int ret; + + /* reset doesn't touch the display */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + + /* We have a modeset vs reset deadlock, defensively unbreak it. + * + * FIXME: We can do a _lot_ better, this is just a first iteration. + */ + i915_gem_set_wedged(dev_priv); + DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n"); + /* * Need mode_config.mutex so that we don't * trample ongoing ->detect() and whatnot. @@ -3512,12 +3505,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) drm_modeset_backoff(ctx); } - - /* reset doesn't touch the display, but flips might get nuked anyway, */ - if (!i915.force_reset_modeset_test && - !gpu_reset_clobbers_display(dev_priv)) - return; - /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. @@ -3547,6 +3534,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state = dev_priv->modeset_restore_state; int ret; + /* reset doesn't touch the display */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + + if (!state) + goto unlock; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3558,22 +3553,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) /* reset doesn't touch the display */ if (!gpu_reset_clobbers_display(dev_priv)) { - if (!state) { - /* - * Flips in the rings have been nuked by the reset, - * so update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * FIXME: Atomic will make this obsolete since we won't schedule - * CS-based flips (which might get lost in gpu resets) any more. - */ - intel_update_primary_planes(dev); - } else { - ret = __intel_display_resume(dev, state, ctx); + /* for testing only restore the display */ + ret = __intel_display_resume(dev, state, ctx); if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); - } } else { /* * The display has been reset as well, @@ -3597,8 +3580,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_hpd_init(dev_priv); } - if (state) - drm_atomic_state_put(state); + drm_atomic_state_put(state); +unlock: drm_modeset_drop_locks(ctx); drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); @@ -9117,6 +9100,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, u64 power_domain_mask; bool active; + if (INTEL_GEN(dev_priv) >= 9) { + intel_crtc_init_scalers(crtc, pipe_config); + + pipe_config->scaler_state.scaler_id = -1; + pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); + } + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; @@ -9145,13 +9135,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (INTEL_GEN(dev_priv) >= 9) { - intel_crtc_init_scalers(crtc, pipe_config); - - pipe_config->scaler_state.scaler_id = -1; - pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); - } - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT_ULL(power_domain); @@ -9540,7 +9523,16 @@ static void i9xx_update_cursor(struct intel_plane *plane, * On some platforms writing CURCNTR first will also * cause CURPOS to be armed by the CURBASE write. * Without the CURCNTR write the CURPOS write would - * arm itself. + * arm itself. Thus we always start the full update + * with a CURCNTR write. + * + * On other platforms CURPOS always requires the + * CURBASE write to arm the update. Additonally + * a write to any of the cursor register will cancel + * an already armed cursor update. Thus leaving out + * the CURBASE write after CURPOS could lead to a + * cursor that doesn't appear to move, or even change + * shape. Thus we always write CURBASE. * * CURCNTR and CUR_FBC_CTL are always * armed by the CURBASE write only. @@ -9559,6 +9551,7 @@ static void i9xx_update_cursor(struct intel_plane *plane, plane->cursor.cntl = cntl; } else { I915_WRITE_FW(CURPOS(pipe), pos); + I915_WRITE_FW(CURBASE(pipe), base); } POSTING_READ_FW(CURBASE(pipe)); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 52d5b82790d9fa5461a3c75977b320bd4348d407..c17ed0e62b6757b678272056326062c74f71ee6b 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_SKYLAKE(dev_priv)) return true; - if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) + if (IS_KABYLAKE(dev_priv)) return true; return false; } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 52b3a1fd4059bab91a898f7f498a79af2c117a4b..57ef5833c4274958f3eed9f39b66a598dcfd1cd7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -63,7 +63,6 @@ enum { }; /* Logical Rings */ -void intel_logical_ring_stop(struct intel_engine_cs *engine); void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int logical_render_ring_init(struct intel_engine_cs *engine); int logical_xcs_ring_init(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 96c2cbd81869e7e55dedc8ce09f53938466bee70..593349be8b9dfce328d20ea3ca5d1b22e41da320 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, if (i915.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { - return panel->backlight.max - val; + return panel->backlight.max - val + panel->backlight.min; } return val; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 48ea0fca1f7239eb502b8269d57b95520ddb5708..40b224b44d1bec1bd08e970be65fe775f3a4c12e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && (plane_bytes_per_line / 512 < 1)) selected_result = method2; - else if ((ddb_allocation && ddb_allocation / - fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) + else if (ddb_allocation >= + fixed_16_16_to_u32_round_up(plane_blocks_per_line)) selected_result = min_fixed_16_16(method1, method2); else if (latency >= linetime_us) selected_result = min_fixed_16_16(method1, method2); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 627e2aa097665f7667f2cf6a834112dd28ac1a3a..8cdec455cf7d557a1f55cf090357b8be1d3f114b 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void) mkwrite_device_info(i915)->ring_mask = BIT(0); i915->engine[RCS] = mock_engine(i915, "mock"); if (!i915->engine[RCS]) - goto err_dependencies; + goto err_priorities; i915->kernel_context = mock_context(i915, NULL); if (!i915->kernel_context) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index b638d192ce5e046cc29e87459cc36e988e498ea8..99d39b2aefa675941d42c86b3c9b5a4d2cda937b 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -5,7 +5,7 @@ config DRM_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU - select QCOM_MDT_LOADER + select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b4b54f1c24bc1995a032493838d99e8e31dff9e9..f9eae03aa1dcaef072974d60216fb6b09ef81e66 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include "msm_gem.h" #include "msm_mmu.h" @@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 -#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) - static int zap_shader_load_mdt(struct device *dev, const char *fwname) { const struct firmware *fw; + struct device_node *np; + struct resource r; phys_addr_t mem_phys; ssize_t mem_size; void *mem_region = NULL; int ret; + if (!IS_ENABLED(CONFIG_ARCH_QCOM)) + return -EINVAL; + + np = of_get_child_by_name(dev->of_node, "zap-shader"); + if (!np) + return -ENODEV; + + np = of_parse_phandle(np, "memory-region", 0); + if (!np) + return -EINVAL; + + ret = of_address_to_resource(np, 0, &r); + if (ret) + return ret; + + mem_phys = r.start; + mem_size = resource_size(&r); + /* Request the MDT file for the firmware */ ret = request_firmware(&fw, fwname, dev); if (ret) { @@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) } /* Allocate memory for the firmware image */ - mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); + mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); if (!mem_region) { ret = -ENOMEM; goto out; @@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); out: + if (mem_region) + memunmap(mem_region); + release_firmware(fw); return ret; } -#else -static int zap_shader_load_mdt(struct device *dev, const char *fwname) -{ - return -ENODEV; -} -#endif static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) @@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, gpu->funcs->flush(gpu); } -struct a5xx_hwcg { +static const struct { u32 offset; u32 value; -}; - -static const struct a5xx_hwcg a530_hwcg[] = { +} a5xx_hwcg[] = { {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, @@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = { {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} }; -static const struct { - int (*test)(struct adreno_gpu *gpu); - const struct a5xx_hwcg *regs; - unsigned int count; -} a5xx_hwcg_regs[] = { - { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, -}; - -static void _a5xx_enable_hwcg(struct msm_gpu *gpu, - const struct a5xx_hwcg *regs, unsigned int count) +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { unsigned int i; - for (i = 0; i < count; i++) - gpu_write(gpu, regs[i].offset, regs[i].value); + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) + gpu_write(gpu, a5xx_hwcg[i].offset, + state ? a5xx_hwcg[i].value : 0); - gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); - gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); -} - -static void a5xx_enable_hwcg(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { - if (a5xx_hwcg_regs[i].test(adreno_gpu)) { - _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, - a5xx_hwcg_regs[i].count); - return; - } - } + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); } static int a5xx_me_init(struct msm_gpu *gpu) @@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu) return ret; } -/* Set up a child device to "own" the zap shader */ -static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev) -{ - struct device_node *node; - int ret; - - if (dev->parent) - return 0; - - /* Find the sub-node for the zap shader */ - node = of_get_child_by_name(parent->of_node, "zap-shader"); - if (!node) { - DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n"); - return -ENODEV; - } - - dev->parent = parent; - dev->of_node = node; - dev_set_name(dev, "adreno_zap_shader"); - - ret = device_register(dev); - if (ret) { - DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n"); - goto out; - } - - ret = of_reserved_mem_device_init(dev); - if (ret) { - DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n"); - device_unregister(dev); - } - -out: - if (ret) - dev->parent = NULL; - - return ret; -} - static int a5xx_zap_shader_init(struct msm_gpu *gpu) { static bool loaded; @@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) return -ENODEV; } - ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); - - if (!ret) - ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev, - adreno_gpu->info->zapfw); + ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw); loaded = !ret; @@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); /* Enable HWCG */ - a5xx_enable_hwcg(gpu); + a5xx_set_hwcg(gpu, true); gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); @@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu) DBG("%s", gpu->name); - if (a5xx_gpu->zap_dev.parent) - device_unregister(&a5xx_gpu->zap_dev); - if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); @@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = { 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, - 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, - 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, - 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, - 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, - 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, - 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, - 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, - 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, - 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, - 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, - 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, - 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, - 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, - 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, - 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, - 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, - 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, - 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, - 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, - 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, - 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, - 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, - 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, - 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, - ~0 + 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, + 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, + 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, + 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, + 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, + 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, + 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, + 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, + 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, + 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, + 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, + 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, + 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, + 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, + 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, + 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, + 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, + 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, + 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, + 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, + 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, + 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, + 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F, + 0xB9A0, 0xB9BF, ~0 }; static void a5xx_dump(struct msm_gpu *gpu) @@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + + /* + * Temporarily disable hardware clock gating before going into + * adreno_show to avoid issues while reading the registers + */ + a5xx_set_hwcg(gpu, false); adreno_show(gpu, m); + a5xx_set_hwcg(gpu, true); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6638bc85645dbad4adf3689bd7d9bae9441173c2..1137092241d593c34e4607e3c723acfb74861972 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -36,8 +36,6 @@ struct a5xx_gpu { uint32_t gpmu_dwords; uint32_t lm_leakage; - - struct device zap_dev; }; #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) @@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, } bool a5xx_idle(struct msm_gpu *gpu); +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); #endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f1ab2703674a2f5d4f533828bb6c8b49df24f571..7414c6bbd582e9597e502305885f0dec909859be 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) *value = adreno_gpu->base.fast_rate; return 0; case MSM_PARAM_TIMESTAMP: - if (adreno_gpu->funcs->get_timestamp) - return adreno_gpu->funcs->get_timestamp(gpu, value); + if (adreno_gpu->funcs->get_timestamp) { + int ret; + + pm_runtime_get_sync(&gpu->pdev->dev); + ret = adreno_gpu->funcs->get_timestamp(gpu, value); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + + return ret; + } return -EINVAL; default: DBG("%s: invalid param: %u", gpu->name, param); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 9e9c5696bc03547b813ecf2ae56c535265e64bcd..c7b612c3d7717a02d8d64be21dea68f183163917 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + ret = dsi_calc_clk_rate(msm_host); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + return; + } clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; clk_req->escclk_rate = msm_host->esc_clk_rate; @@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - int ret; if (msm_host->mode) { drm_mode_destroy(msm_host->dev, msm_host->mode); @@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, return -ENOMEM; } - ret = dsi_calc_clk_rate(msm_host); - if (ret) { - pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index cb5415d6c04b7ab6e1e80503d26b32891a934dee..735a87a699fafafb99b179752e0e7f3c19491389 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_ctl *ctl = mdp5_cstate->ctl; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; - enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; - enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; int i, plane_cnt = 0; bool bg_alpha_enabled = false; u32 mixer_op_mode = 0; @@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DBG("Cursor off"); cursor_enable = false; + mdp5_enable(mdp5_kms); goto set_cursor; } @@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); @@ -804,6 +807,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, crtc_flush(crtc, flush_mask); end: + mdp5_disable(mdp5_kms); if (old_bo) { drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); /* enable vblank to complete cursor work: */ @@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | @@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) crtc_flush(crtc, flush_mask); + mdp5_disable(mdp5_kms); + return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 97f3294fbfc6f9d26f453dac36d5dbb3cb00e93a..70bef51245af89d5bbb292f4495f2fd79dcc9e33 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) struct mdp5_interface *intf = mdp5_encoder->intf; if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_cmd_encoder_disable(encoder); + mdp5_cmd_encoder_enable(encoder); else mdp5_vid_encoder_enable(encoder); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5d13fa5381ee37705a0c282bf023b4782fc19268..1c603aef3c59cdff286ce38e84c3e6a4745dd0c2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, const char *name, bool mandatory) { struct device *dev = &pdev->dev; - struct clk *clk = devm_clk_get(dev, name); + struct clk *clk = msm_clk_get(pdev, name); if (IS_ERR(clk) && mandatory) { dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); return PTR_ERR(clk); @@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) } /* mandatory clocks: */ - ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); + ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); if (ret) goto fail; /* optional clocks: */ - get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); + get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index fe3a4de1a4331ff86f0b4f0cc85a48b208bca3b5..61f39c86dd09e53a5860ce880b0f7955e5008c05 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, struct mdp5_hw_pipe *right_hwpipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - struct phase_step step = { 0 }; - struct pixel_ext pe = { 0 }; + struct phase_step step = { { 0 } }; + struct pixel_ext pe = { { 0 } }; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; unsigned int rotation; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 65f35544c1ec8859018c2afb713fa5120fc43272..a0c60e738db8d7be5e841311832e82f0b45bd7fa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj, struct page **pages; vma = add_vma(obj, aspace); - if (IS_ERR(vma)) - return PTR_ERR(vma); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto unlock; + } pages = get_pages(obj); if (IS_ERR(pages)) { @@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, fail: del_vma(vma); - +unlock: mutex_unlock(&msm_obj->lock); return ret; } @@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, if (use_vram) { struct msm_gem_vma *vma; struct page **pages; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + mutex_lock(&msm_obj->lock); vma = add_vma(obj, NULL); + mutex_unlock(&msm_obj->lock); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 6bfca74701410050b20d1a136ae5cdc4454b1306..8a75c0bd8a78b1481e30fdab63f2d14bfc64536d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) { struct msm_gem_submit *submit; - uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + - (nr_cmds * sizeof(submit->cmd[0])); + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + + ((u64)nr_cmds * sizeof(submit->cmd[0])); if (sz > SIZE_MAX) return NULL; @@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit); if (ret) goto out; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index c36321bc87148864db09bd0af4fc38a39cb182f9..d34e331554f3903eaded86cf12fdd4a4ef24507a 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -42,7 +42,7 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt) { - if (!vma->iova) + if (!aspace || !vma->iova) return; if (aspace->mmu) { diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 147b22163f9f6839419ebe3051f492f20fd9e0eb..dab78c660dd63d2c938d6b33524c3aa5de048633 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg) return -ENODEV; if (WARN_ON(msg->size > 16)) return -E2BIG; - if (msg->size == 0) - return msg->size; ret = nvkm_i2c_aux_acquire(aux); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 8d1df5678eaaa7291eaf64ce71d39d79cccd60ca..f362c9fa8b3bb42a10dd2ea63c378f1b5d8255fb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; - struct drm_crtc *crtc; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) drm_crtc_force_disable_all(dev); } - /* Make sure that drm and hw vblank irqs get properly disabled. */ - drm_for_each_crtc(crtc, dev) - drm_crtc_vblank_off(crtc); - /* disable flip completion events */ nvif_notify_put(&drm->flip); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index e3132a2ce34d60acb9eb75cdf83ec1506e8f874b..2bc0dc9852144cfe0825b5da075333d9e85c5b9c 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) drm_mode_connector_attach_encoder(connector, encoder); if (dcbe->type == DCB_OUTPUT_DP) { + struct nv50_disp *disp = nv50_disp(encoder->dev); struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index); if (aux) { - nv_encoder->i2c = &nv_connector->aux.ddc; + if (disp->disp->oclass < GF110_DISP) { + /* HW has no support for address-only + * transactions, so we're required to + * use custom I2C-over-AUX code. + */ + nv_encoder->i2c = &aux->i2c; + } else { + nv_encoder->i2c = &nv_connector->aux.ddc; + } nv_encoder->aux = aux; } /*TODO: Use DP Info Table to check for support. */ - if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { + if (disp->disp->oclass >= GF110_DISP) { ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, nv_connector->base.base.id, &nv_encoder->dp.mstm); @@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, asyh->clr.mask, asyh->set.mask); + if (crtc_state->active && !asyh->state.active) + drm_crtc_vblank_off(crtc); if (asyh->clr.mask) { nv50_head_flush_clr(head, asyh, atom->flush_disable); @@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) nv50_head_flush_set(head, asyh); interlock_core = 1; } - } - for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (crtc->state->event) - drm_crtc_vblank_get(crtc); + if (asyh->state.active) { + if (!crtc_state->active) + drm_crtc_vblank_on(crtc); + if (asyh->state.event) + drm_crtc_vblank_get(crtc); + } } /* Update plane(s). */ @@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) if (crtc->state->event) { unsigned long flags; /* Get correct count/ts if racing with vblank irq */ - drm_accurate_vblank_count(crtc); + if (crtc->state->active) + drm_accurate_vblank_count(crtc); spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, crtc->state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; - drm_crtc_vblank_put(crtc); + if (crtc->state->active) + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index c7c84d34d97e20308b926077e7ed4ce6e8d77281..88582af8bd89745b7c78332cf415dd0bc9f24e23 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) /* Create output path objects for each VBIOS display path. */ i = -1; while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { + if (ver < 0x40) /* No support for chipsets prior to NV50. */ + break; if (dcbE.type == DCB_OUTPUT_UNUSED) continue; if (dcbE.type == DCB_OUTPUT_EOL) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index a24312fb0228ff1f1053bca27f33c5702d2dc2ac..a1e8bf48b778430d45d46fb9e9de453fb897cd2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h @@ -22,6 +22,7 @@ struct nvkm_ior { unsigned proto_evo:4; enum nvkm_ior_proto { CRT, + TV, TMDS, LVDS, DP, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index 19c635663399a2442b670479df18621119595043..6ea19466f43622565052d8600424c948c9705343 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -22,7 +22,7 @@ struct nv50_disp { u8 type[3]; } pior; - struct nv50_disp_chan *chan[17]; + struct nv50_disp_chan *chan[21]; }; void nv50_disp_super_1(struct nv50_disp *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 85aff85394ac29893b25180d62a6994af266c429..be9e7f8c3b2392fa96643f91391e606196ad7a67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type) case 0: switch (outp->info.type) { case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; + case DCB_OUTPUT_TV : *type = DAC; return TV; case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; case DCB_OUTPUT_DP : *type = SOR; return DP; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index c794b2c2d21e710faa3ac64f70ac970ae69826c9..6d8f21290aa20342c1d50f73aa836feab1266d8c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) if (bar->bar[0].mem) { addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; - nvkm_wr32(device, 0x001714, 0xc0000000 | addr); + nvkm_wr32(device, 0x001714, 0x80000000 | addr); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild index 48f01e40b8fcc948126cbc1601b0217c034e7b90..b768e66a472beef8da6f871b6da6bdb471dc2a05 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild @@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o nvkm-y += nvkm/subdev/i2c/aux.o nvkm-y += nvkm/subdev/i2c/auxg94.o +nvkm-y += nvkm/subdev/i2c/auxgf119.o nvkm-y += nvkm/subdev/i2c/auxgm200.o nvkm-y += nvkm/subdev/i2c/anx9805.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index d172e42dd2280682d58c3907ed882bae03fa0a2f..4c1f547da463afff3dee772446f92f13b2f4043e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -117,6 +117,10 @@ int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, u32 addr, u8 *data, u8 *size) { + if (!*size && !aux->func->address_only) { + AUX_ERR(aux, "address-only transaction dropped"); + return -ENOSYS; + } return aux->func->xfer(aux, retry, type, addr, data, size); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h index 27a4a39c87f0044577a6b72bd4e95c6e4fc1437b..9587ab456d9eaa882eb3e9e9749993dc8fe62e17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h @@ -3,6 +3,7 @@ #include "pad.h" struct nvkm_i2c_aux_func { + bool address_only; int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, @@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); +int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, + int, u8, struct nvkm_i2c_aux **); + int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); +int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *); +int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); #define AUX_MSG(b,l,f,a...) do { \ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index ab8cb196c34e73202c288c7dc24a14a88a18b78f..c8ab1b5741a3e3c0e93a9943c0b3811b8c646ea6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux) return 0; } -static int +int g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, u8 type, u32 addr, u8 *data, u8 *size) { @@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, } ctrl = nvkm_rd32(device, 0x00e4e4 + base); - ctrl &= ~0x0001f0ff; + ctrl &= ~0x0001f1ff; ctrl |= type << 12; - ctrl |= *size - 1; + ctrl |= (*size ? (*size - 1) : 0x00000100); nvkm_wr32(device, 0x00e4e0 + base, addr); /* (maybe) retry transaction a number of times on failure... */ @@ -160,14 +160,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, return ret < 0 ? ret : (stat & 0x000f0000) >> 16; } -static const struct nvkm_i2c_aux_func -g94_i2c_aux_func = { - .xfer = g94_i2c_aux_xfer, -}; - int -g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, - struct nvkm_i2c_aux **paux) +g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func, + struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) { struct g94_i2c_aux *aux; @@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, return -ENOMEM; *paux = &aux->base; - nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); + nvkm_i2c_aux_ctor(func, pad, index, &aux->base); aux->ch = drive; aux->base.intr = 1 << aux->ch; return 0; } + +static const struct nvkm_i2c_aux_func +g94_i2c_aux = { + .xfer = g94_i2c_aux_xfer, +}; + +int +g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) +{ + return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c new file mode 100644 index 0000000000000000000000000000000000000000..dab40cd8fe3a9659a8067187645cbf51454d861b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c @@ -0,0 +1,35 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "aux.h" + +static const struct nvkm_i2c_aux_func +gf119_i2c_aux = { + .address_only = true, + .xfer = g94_i2c_aux_xfer, +}; + +int +gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) +{ + return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index ee091fa79628e1da1864d230731d4dd8e94e5cfa..7ef60895f43a7808229a8d72e27b2b54d67f3d48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, } ctrl = nvkm_rd32(device, 0x00d954 + base); - ctrl &= ~0x0001f0ff; + ctrl &= ~0x0001f1ff; ctrl |= type << 12; - ctrl |= *size - 1; + ctrl |= (*size ? (*size - 1) : 0x00000100); nvkm_wr32(device, 0x00d950 + base, addr); /* (maybe) retry transaction a number of times on failure... */ @@ -162,6 +162,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, static const struct nvkm_i2c_aux_func gm200_i2c_aux_func = { + .address_only = true, .xfer = gm200_i2c_aux_xfer, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c index d53212f1aa52731dc7929d2b89637d6a5d1c51b9..3bc4d03100767da585ad9d6dbd95483be3174403 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c @@ -28,7 +28,7 @@ static const struct nvkm_i2c_pad_func gf119_i2c_pad_s_func = { .bus_new_4 = gf119_i2c_bus_new, - .aux_new_6 = g94_i2c_aux_new, + .aux_new_6 = gf119_i2c_aux_new, .mode = g94_i2c_pad_mode, }; @@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad) static const struct nvkm_i2c_pad_func gf119_i2c_pad_x_func = { .bus_new_4 = gf119_i2c_bus_new, - .aux_new_6 = g94_i2c_aux_new, + .aux_new_6 = gf119_i2c_aux_new, }; int diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 50c41c0a50ef3f989553bc091193b57288a28914..dcc539ba85d65da734d9a94a77bfc6b09d62b05e 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -5,6 +5,10 @@ config DRM_ROCKCHIP select DRM_KMS_HELPER select DRM_PANEL select VIDEOMODE_HELPERS + select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP + select DRM_DW_HDMI if ROCKCHIP_DW_HDMI + select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI + select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC help Choose this option if you have a Rockchip soc chipset. This driver provides kernel mode setting and buffer @@ -12,10 +16,10 @@ config DRM_ROCKCHIP 2D or 3D acceleration; acceleration is performed by other IP found on the SoC. +if DRM_ROCKCHIP + config ROCKCHIP_ANALOGIX_DP bool "Rockchip specific extensions for Analogix DP driver" - depends on DRM_ROCKCHIP - select DRM_ANALOGIX_DP help This selects support for Rockchip SoC specific extensions for the Analogix Core DP driver. If you want to enable DP @@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP config ROCKCHIP_CDN_DP bool "Rockchip cdn DP" - depends on DRM_ROCKCHIP - depends on EXTCON - select SND_SOC_HDMI_CODEC if SND_SOC + depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) help This selects support for Rockchip SoC specific extensions for the cdn DP driver. If you want to enable Dp on @@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP config ROCKCHIP_DW_HDMI bool "Rockchip specific extensions for Synopsys DW HDMI" - depends on DRM_ROCKCHIP - select DRM_DW_HDMI help This selects support for Rockchip SoC specific extensions for the Synopsys DesignWare HDMI driver. If you want to @@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI config ROCKCHIP_DW_MIPI_DSI bool "Rockchip specific extensions for Synopsys DW MIPI DSI" - depends on DRM_ROCKCHIP - select DRM_MIPI_DSI help This selects support for Rockchip SoC specific extensions for the Synopsys DesignWare HDMI driver. If you want to @@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI config ROCKCHIP_INNO_HDMI bool "Rockchip specific extensions for Innosilicon HDMI" - depends on DRM_ROCKCHIP help This selects support for Rockchip SoC specific extensions for the Innosilicon HDMI driver. If you want to enable HDMI on RK3036 based SoC, you should select this option. + +endif diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d450332c2fd79fd8c1052aca84d7d15c5db3ef8..2900f1410d959bc9f4002a6f0c38a3aae5a9c59a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop) static int vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); - int ret; + int ret, i; ret = pm_runtime_get_sync(vop->dev); if (ret < 0) { @@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc) } memcpy(vop->regs, vop->regsbak, vop->len); + /* + * We need to make sure that all windows are disabled before we + * enable the crtc. Otherwise we might try to scan from a destroyed + * buffer later. + */ + for (i = 0; i < vop->data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + const struct vop_win_data *win = vop_win->data; + + spin_lock(&vop->reg_lock); + VOP_WIN_SET(vop, win, enable, 0); + spin_unlock(&vop->reg_lock); + } + vop_cfg_done(vop); /* @@ -566,28 +580,11 @@ static int vop_enable(struct drm_crtc *crtc) static void vop_crtc_disable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); - int i; WARN_ON(vop->event); rockchip_drm_psr_deactivate(&vop->crtc); - /* - * We need to make sure that all windows are disabled before we - * disable that crtc. Otherwise we might try to scan from a destroyed - * buffer later. - */ - for (i = 0; i < vop->data->win_size; i++) { - struct vop_win *vop_win = &vop->win[i]; - const struct vop_win_data *win = vop_win->data; - - spin_lock(&vop->reg_lock); - VOP_WIN_SET(vop, win, enable, 0); - spin_unlock(&vop->reg_lock); - } - - vop_cfg_done(vop); - drm_crtc_vblank_off(crtc); /* @@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) + if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) { + DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; + } return 0; } @@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, format, format); - VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); + VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); if (is_yuv_support(fb->format->format)) { int hsub = drm_format_horz_chroma_subsampling(fb->format->format); @@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, offset += (src->y1 >> 16) * fb->pitches[1] / vsub; dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; - VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); + VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); VOP_WIN_SET(vop, win, uv_mst, dma_addr); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 9979fd0c22821d7efa3d7054468e0914619e0692..27eefbfcf3d05f3ad03e3b9f6fe2ae56f68e7684 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h, act_height = (src_h + vskiplines - 1) / vskiplines; + if (act_height == dst_h) + return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines; + return GET_SCL_FT_BILI_DN(act_height, dst_h); } diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index 2c4817fb08902427df09223b7ab4046241fc6c77..8fe5b184b4e8a945d68c201a3a29be531594e2d2 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -7,7 +7,6 @@ config DRM_STM select DRM_PANEL select VIDEOMODE_HELPERS select FB_PROVIDE_GET_FB_UNMAPPED_AREA - default y help Enable support for the on-chip display controller on diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 35bf781e418e339eae86760c04697b9a1b128d25..c7056322211cc80fa5b42742ffcd496973d19ed6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -30,49 +30,49 @@ #include #include -static struct ttm_place vram_placement_flags = { +static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED }; -static struct ttm_place vram_ne_placement_flags = { +static const struct ttm_place vram_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place sys_placement_flags = { +static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED }; -static struct ttm_place sys_ne_placement_flags = { +static const struct ttm_place sys_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place gmr_placement_flags = { +static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED }; -static struct ttm_place gmr_ne_placement_flags = { +static const struct ttm_place gmr_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place mob_placement_flags = { +static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED }; -static struct ttm_place mob_ne_placement_flags = { +static const struct ttm_place mob_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT @@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = { .busy_placement = &vram_placement_flags }; -static struct ttm_place vram_gmr_placement_flags[] = { +static const struct ttm_place vram_gmr_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = { } }; -static struct ttm_place gmr_vram_placement_flags[] = { +static const struct ttm_place gmr_vram_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = { .busy_placement = &gmr_placement_flags }; -static struct ttm_place vram_gmr_ne_placement_flags[] = { +static const struct ttm_place vram_gmr_ne_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = { .busy_placement = &sys_ne_placement_flags }; -static struct ttm_place evictable_placement_flags[] = { +static const struct ttm_place evictable_placement_flags[] = { { .fpfn = 0, .lpfn = 0, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 99a7f4ab7d97edf1652aae7a04ec0f8cf51c9e6f..86178796de6c354a32cf1ef35637aa459d8cb7b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, if (ret) return ret; - header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, - &header->handle); + header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, + &header->handle); if (!header->cb_header) { ret = -ENOMEM; goto out_no_cb_header; @@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, cb_hdr = header->cb_header; offset = header->node.start << PAGE_SHIFT; header->cmd = man->map + offset; - memset(cb_hdr, 0, sizeof(*cb_hdr)); if (man->using_mob) { cb_hdr->flags = SVGA_CB_FLAG_MOB; cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; @@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) return -ENOMEM; - dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, - &header->handle); + dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, + &header->handle); if (!dheader) return -ENOMEM; @@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, cb_hdr = &dheader->cb_header; header->cb_header = cb_hdr; header->cmd = dheader->cmd; - memset(dheader, 0, sizeof(*dheader)); cb_hdr->status = SVGA_CB_STATUS_NONE; cb_hdr->flags = SVGA_CB_FLAG_NONE; cb_hdr->ptr.pa = (u64)header->handle + diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 1f013d45c9e9a3959dfa19300ba76fc37820592a..36c7b6c839c0dd230752cba96c348b40e2c65c65 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, int ret; cres = kzalloc(sizeof(*cres), GFP_KERNEL); - if (unlikely(cres == NULL)) + if (unlikely(!cres)) return -ENOMEM; cres->hash.key = user_key | (res_type << 24); @@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) int ret; man = kzalloc(sizeof(*man), GFP_KERNEL); - if (man == NULL) + if (!man) return ERR_PTR(-ENOMEM); man->dev_priv = dev_priv; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index bcc6d4136c878ef159e0aeefcf00f294fc565ef2..4212b3e673bce2df55c5c6b71894a97fbe9b7c5f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { uctx->cotables[i] = vmw_cotable_alloc(dev_priv, &uctx->res, i); - if (unlikely(uctx->cotables[i] == NULL)) { - ret = -ENOMEM; + if (unlikely(IS_ERR(uctx->cotables[i]))) { + ret = PTR_ERR(uctx->cotables[i]); goto out_cotables; } } @@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(ctx == NULL)) { + if (unlikely(!ctx)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_context_size); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 6c026d75c18043a9e8f6926e21f3bef917038f8a..d87861bbe971b1d474e1ab2b43cd2097bd2c32de 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, return ERR_PTR(ret); vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); - if (unlikely(vcotbl == NULL)) { + if (unlikely(!vcotbl)) { ret = -ENOMEM; goto out_no_alloc; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4a641555b960b8fc654c6ab89c428e783a61aad4..4436d53ae16c7da1e15769e446015c69f6495995 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_AUTH | DRM_RENDER_ALLOW), }; -static struct pci_device_id vmw_pci_id_list[] = { +static const struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; @@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) char host_log[100] = {0}; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (unlikely(dev_priv == NULL)) { + if (unlikely(!dev_priv)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } @@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); - if (unlikely(vmw_fp == NULL)) + if (unlikely(!vmw_fp)) return ret; vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); @@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev, struct vmw_master *vmaster; vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); - if (unlikely(vmaster == NULL)) + if (unlikely(!vmaster)) return -ENOMEM; vmw_master_init(vmaster); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c7b53d987f06c923c53eaafb0fccdd9631ba72a7..2cfb3c93f42ab430be0b36ae78c40897ef75cc4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, } node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(node == NULL)) { + if (unlikely(!node)) { DRM_ERROR("Failed to allocate a resource validation " "entry.\n"); return -ENOMEM; @@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list, struct vmw_resource_relocation *rel; rel = kmalloc(sizeof(*rel), GFP_KERNEL); - if (unlikely(rel == NULL)) { + if (unlikely(!rel)) { DRM_ERROR("Failed to allocate a resource relocation.\n"); return -ENOMEM; } @@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, @@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, /** * vmw_cmd_dx_ia_set_vertex_buffers - Validate an - * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. + * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6b2708b4eafe84c832d41a10f75c2be9e65fc0b9..b8bc5bc7de7e0d4616cbbfe6fbee1d309aa55360 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) { struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); - if (unlikely(fman == NULL)) + if (unlikely(!fman)) return NULL; fman->dev_priv = dev_priv; @@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) + if (unlikely(!fence)) return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, @@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv, return ret; ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); - if (unlikely(ufence == NULL)) { + if (unlikely(!ufence)) { ret = -ENOMEM; goto out_no_object; } @@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, struct vmw_fence_manager *fman = fman_from_fence(fence); eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); - if (unlikely(eaction == NULL)) + if (unlikely(!eaction)) return -ENOMEM; eaction->event = event; @@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, int ret; event = kzalloc(sizeof(*event), GFP_KERNEL); - if (unlikely(event == NULL)) { + if (unlikely(!event)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; goto out_no_space; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c1900f4390a41efbf5a613fb6f451d391e336fed..d2b03d4a3c867d11a0167f3ff7ba85b246289b88 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); - if (unlikely(gman == NULL)) + if (unlikely(!gman)) return -ENOMEM; spin_lock_init(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3d94ea67a825e1cf37d9075035283236ebff69fa..61e06f0e8cd3b43890e807a2ab2bb9037ac95e99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; + + if (plane->fb) { + hotspot_x += plane->fb->hot_x; + hotspot_y += plane->fb->hot_y; + } + du->cursor_surface = vps->surf; du->cursor_dmabuf = vps->dmabuf; @@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, du->cursor_y + hotspot_y); + + du->core_hotspot_x = hotspot_x - du->hotspot_x; + du->core_hotspot_y = hotspot_y - du->hotspot_y; } else { DRM_ERROR("Failed to update cursor image\n"); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 941bcfd131ff7a169472b0b92c62cf7a40e01bd9..b17f08fc50d3fa2306505bedf33d6eea1ec7e323 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv) if (dev_priv->has_dx) { *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); } else { *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); @@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) { struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); - if (unlikely(mob == NULL)) + if (unlikely(!mob)) return NULL; mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6063c9636d4a08f0b908eb54531b8f2efb818bd8..97000996b8dc5122ba5d8109af5b14930b6f4e60 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, reply_len = ebx; reply = kzalloc(reply_len + 1, GFP_KERNEL); - if (reply == NULL) { + if (!reply) { DRM_ERROR("Cannot allocate memory for reply\n"); return -ENOMEM; } @@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); return -ENOMEM; } @@ -400,7 +400,7 @@ int vmw_host_log(const char *log) msg_len = strlen(log) + strlen("log ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory for log message\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7d591f653dfa323e5f51f0bfeca58dac44bf5fa4..a96f90f017d16072423a95302779e54444339cd4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, int ret; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(user_bo == NULL)) { + if (unlikely(!user_bo)) { DRM_ERROR("Failed to allocate a buffer.\n"); return -ENOMEM; } @@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, } backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(backup == NULL)) + if (unlikely(!backup)) return -ENOMEM; ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 68f135c5b0d8e0b81dff7dd039ea1a032683f89b..9b832f13681370e38cf3345f1eb6f5415f715c71 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, } ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); - if (unlikely(ushader == NULL)) { + if (unlikely(!ushader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_shader_size); ret = -ENOMEM; @@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, } shader = kzalloc(sizeof(*shader), GFP_KERNEL); - if (unlikely(shader == NULL)) { + if (unlikely(!shader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_size); ret = -ENOMEM; @@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, /* Allocate and pin a DMA buffer */ buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(buf == NULL)) + if (unlikely(!buf)) return -ENOMEM; ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 50be1f034f9efa701f2c6feda57fe28d8cf6d596..5284e8d2f7ba4b0f3c45972c3446800a8debceeb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) * something arbitrarily large and we will reject any layout * that doesn't fit prim_bb_mem later */ - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } vmw_kms_create_implicit_placement_property(dev_priv, false); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 2c58a390123a1a7c3f8a4e6fd6f11a34f3340fbe..7782725141648360cedd89f3363155da5f7c8d9c 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); - if (err) + if (err == -ENODEV) { + iommu_domain_free(host->domain); + host->domain = NULL; + goto skip_iommu; + } else if (err) { goto fail_free_domain; + } geometry = &host->domain->geometry; @@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev) host->iova_end = geometry->aperture_end; } +skip_iommu: err = host1x_channel_list_init(&host->channel_list, host->info->nb_channels); if (err) { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6fd01a692197af4e70c9e1bb4bfd0bd8ffb4f348..9017dcc14502d7236ff48a03457cb67a7f9d0a5e 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = { #if IS_ENABLED(CONFIG_HID_ORTEK) { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, #endif #if IS_ENABLED(CONFIG_HID_PANTHERLORD) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 3d911bfd91cf1a8ea67c504f344ecba32acc603e..c9ba4c6db74ca076d0e26426656d238ac878088e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -824,6 +824,7 @@ #define USB_VENDOR_ID_ORTEK 0x05a4 #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 +#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 #define USB_VENDOR_ID_PLANTRONICS 0x047f diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c index 6620f15fec228a21ccf6f1b12a5e7dfd7b73046d..8783a064cdcf43b0752973a4693d72ddca4fe4fa 100644 --- a/drivers/hid/hid-ortek.c +++ b/drivers/hid/hid-ortek.c @@ -5,6 +5,7 @@ * * Ortek PKB-1700 * Ortek WKB-2000 + * iHome IMAC-A210S * Skycable wireless presenter * * Copyright (c) 2010 Johnathon Harris @@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { - hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n"); + hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n"); rdesc[55] = 0x92; } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { - hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); + hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n"); rdesc[53] = 0x65; } return rdesc; @@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, static const struct hid_device_id ortek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, { } }; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 76013eb5cb7faedbf5ec6ca62dca6181afc5919b..c008847e0b20a2accb00451b10fb1c648f67925b 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid) struct usbhid_device *usbhid = hid->driver_data; int res; + set_bit(HID_OPENED, &usbhid->iofl); + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) return 0; res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ - if (res < 0) + if (res < 0) { + clear_bit(HID_OPENED, &usbhid->iofl); return -EIO; + } usbhid->intf->needs_remote_wakeup = 1; set_bit(HID_RESUME_RUNNING, &usbhid->iofl); - set_bit(HID_OPENED, &usbhid->iofl); set_bit(HID_IN_POLLING, &usbhid->iofl); res = hid_start_in(hid); @@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return; - /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing * with hid_start_in(). */ spin_lock_irq(&usbhid->lock); - clear_bit(HID_IN_POLLING, &usbhid->iofl); clear_bit(HID_OPENED, &usbhid->iofl); + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) + clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) + return; + hid_cancel_delayed_stuff(usbhid); usb_kill_urb(usbhid->urbin); usbhid->intf->needs_remote_wakeup = 0; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 1006b230b236f1c977d1c1a7d9bf32a268b7e263..65fa29591d21641fd1bd4e4484d8daeef56f9bdb 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -983,7 +983,7 @@ config I2C_UNIPHIER_F config I2C_VERSATILE tristate "ARM Versatile/Realview I2C bus support" - depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST + depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST select I2C_ALGOBIT help Say yes if you want to support the I2C serial bus on ARMs Versatile diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2ea6d0d25a01a33069bce293ab6858947a0cb01f..143a8fd582b4aeb905ea25b416261a5c1f44a6e9 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) } acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); + /* Some broken DSTDs use 1MiHz instead of 1MHz */ + if (acpi_speed == 1048576) + acpi_speed = 1000000; /* * Find bus speed from the "clock-frequency" device property, ACPI * or by using fast mode if neither is set. @@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (dev->clk_freq != 100000 && dev->clk_freq != 400000 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { dev_err(&pdev->dev, - "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + dev->clk_freq); ret = -EINVAL; goto exit_reset; } diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 4842ec3a5451ed479446fc13352405aca45697d2..a9126b3cda61bc95f6a9d1282821ab7552484534 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); } +const struct acpi_device_id * +i2c_acpi_match_device(const struct acpi_device_id *matches, + struct i2c_client *client) +{ + if (!(client && matches)) + return NULL; + + return acpi_match_device(matches, &client->dev); +} + static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev) } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); -static int i2c_acpi_match_adapter(struct device *dev, void *data) +static int i2c_acpi_find_match_adapter(struct device *dev, void *data) { struct i2c_adapter *adapter = i2c_verify_adapter(dev); @@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data) return ACPI_HANDLE(dev) == (acpi_handle)data; } -static int i2c_acpi_match_device(struct device *dev, void *data) +static int i2c_acpi_find_match_device(struct device *dev, void *data) { return ACPI_COMPANION(dev) == data; } @@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) struct device *dev; dev = bus_find_device(&i2c_bus_type, NULL, handle, - i2c_acpi_match_adapter); + i2c_acpi_find_match_adapter); return dev ? i2c_verify_adapter(dev) : NULL; } @@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { struct device *dev; - dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); + dev = bus_find_device(&i2c_bus_type, NULL, adev, + i2c_acpi_find_match_device); return dev ? i2c_verify_client(dev) : NULL; } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c89dac7fd2e7b793217119f2ccee849cf75ebcfe..12822a4b8f8f09b5c080f7338a89e0ea00cbb4f2 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev) * Tree match table entry is supplied for the probing device. */ if (!driver->id_table && + !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && !i2c_of_match_device(dev->driver->of_match_table, client)) return -ENODEV; diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 3b63f5e5b89cbda662a580c387bfa2d23e2ebcef..3d3d9bf02101bddf06fc6597f107cc3ac8e3beb8 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags); int i2c_check_7bit_addr_validity_strict(unsigned short addr); #ifdef CONFIG_ACPI +const struct acpi_device_id * +i2c_acpi_match_device(const struct acpi_device_id *matches, + struct i2c_client *client); void i2c_acpi_register_devices(struct i2c_adapter *adap); #else /* CONFIG_ACPI */ static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } +static inline const struct acpi_device_id * +i2c_acpi_match_device(const struct acpi_device_id *matches, + struct i2c_client *client) +{ + return NULL; +} #endif /* CONFIG_ACPI */ extern struct notifier_block i2c_acpi_notifier; diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index 2c64d0e0740f0db0c4427af9d6602bc8aaa0ea24..17121329bb793a615e8969a15327e3f07035cdbb 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL different sets of pins at run-time. This driver can also be built as a module. If so, the module will be - called pinctrl-i2cmux. + called i2c-mux-pinctrl. config I2C_MUX_REG tristate "Register-based I2C multiplexer" diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 6b5d3be283c4e7e00f72bb32e550b52ab06e1526..807299dd45ebf0663fbc97b3831bd8f92148ec20 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -193,7 +193,6 @@ struct bmc150_accel_data { struct regmap *regmap; int irq; struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; - atomic_t active_intr; struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; struct mutex mutex; u8 fifo_mode, watermark; @@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, goto out_fix_power_state; } - if (state) - atomic_inc(&data->active_intr); - else - atomic_dec(&data->active_intr); - return 0; out_fix_power_state: @@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev) struct bmc150_accel_data *data = iio_priv(indio_dev); mutex_lock(&data->mutex); - if (atomic_read(&data->active_intr)) - bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); + bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); bmc150_accel_fifo_set_mode(data); mutex_unlock(&data->mutex); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 07d1489cd457a6b5445b8b3ba35dad95b1792acc..e44f62bf9caa9f1a45c6699875b885d497bb0a7a 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x02, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .en_mask = 0x08, }, }, + .sim = { + .addr = 0x24, + .value = BIT(0), + }, .multi_read_bit = false, .bootime = 2, }, @@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int1 = 0x04, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(1), + }, .multi_read_bit = true, .bootime = 2, /* guess */ }, @@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(7), + }, .multi_read_bit = false, .bootime = 2, /* guess */ }, @@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr_ihl = 0x22, .mask_ihl = 0x80, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int1 = 0x04, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(1), + }, .multi_read_bit = false, .bootime = 2, }, @@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x02, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index e0ea411a0b2df9563085c70552086946843ba2ca..c02b23d675cbc1540ec47769515da714c784cf67 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -22,6 +22,7 @@ #include #include +#include #define ASPEED_RESOLUTION_BITS 10 #define ASPEED_CLOCKS_PER_SAMPLE 12 @@ -38,11 +39,17 @@ #define ASPEED_ENGINE_ENABLE BIT(0) +#define ASPEED_ADC_CTRL_INIT_RDY BIT(8) + +#define ASPEED_ADC_INIT_POLLING_TIME 500 +#define ASPEED_ADC_INIT_TIMEOUT 500000 + struct aspeed_adc_model_data { const char *model_name; unsigned int min_sampling_rate; // Hz unsigned int max_sampling_rate; // Hz unsigned int vref_voltage; // mV + bool wait_init_sequence; }; struct aspeed_adc_data { @@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev) goto scaler_error; } + model_data = of_device_get_match_data(&pdev->dev); + + if (model_data->wait_init_sequence) { + /* Enable engine in normal mode. */ + writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE, + data->base + ASPEED_REG_ENGINE_CONTROL); + + /* Wait for initial sequence complete. */ + ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL, + adc_engine_control_reg_val, + adc_engine_control_reg_val & + ASPEED_ADC_CTRL_INIT_RDY, + ASPEED_ADC_INIT_POLLING_TIME, + ASPEED_ADC_INIT_TIMEOUT); + if (ret) + goto scaler_error; + } + /* Start all channels in normal mode. */ ret = clk_prepare_enable(data->clk_scaler->clk); if (ret) @@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = { .vref_voltage = 1800, // mV .min_sampling_rate = 1, .max_sampling_rate = 1000000, + .wait_init_sequence = true, }; static const struct of_device_id aspeed_adc_matches[] = { diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 64799ad7ebad02a797607470678aa500842ed99f..462a99c13e7a210a74f1d9b2a9c87de7ebc3fad6 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -28,6 +28,8 @@ #include #define AXP288_ADC_EN_MASK 0xF1 +#define AXP288_ADC_TS_PIN_GPADC 0xF2 +#define AXP288_ADC_TS_PIN_ON 0xF3 enum axp288_adc_id { AXP288_ADC_TS, @@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address, return IIO_VAL_INT; } +static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, + unsigned long address) +{ + int ret; + + /* channels other than GPADC do not need to switch TS pin */ + if (address != AXP288_GP_ADC_H) + return 0; + + ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); + if (ret) + return ret; + + /* When switching to the GPADC pin give things some time to settle */ + if (mode == AXP288_ADC_TS_PIN_GPADC) + usleep_range(6000, 10000); + + return 0; +} + static int axp288_adc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_RAW: + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, + chan->address)) { + dev_err(&indio_dev->dev, "GPADC mode\n"); + ret = -EINVAL; + break; + } ret = axp288_adc_read_channel(val, chan->address, info->regmap); + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, + chan->address)) + dev_err(&indio_dev->dev, "TS pin restore\n"); break; default: ret = -EINVAL; @@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } +static int axp288_adc_set_state(struct regmap *regmap) +{ + /* ADC should be always enabled for internal FG to function */ + if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) + return -EIO; + + return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); +} + static const struct iio_info axp288_adc_iio_info = { .read_raw = &axp288_adc_read_raw, .driver_module = THIS_MODULE, @@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev) * Set ADC to enabled state at all time, including system suspend. * otherwise internal fuel gauge functionality may be affected. */ - ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); + ret = axp288_adc_set_state(axp20x->regmap); if (ret) { dev_err(&pdev->dev, "unable to enable ADC device\n"); return ret; diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c index 81d4c39e414a4da6b0f8df0ebd8922d371a909d7..137f577d94326a299e0f944edfd108bdc2db31fe 100644 --- a/drivers/iio/adc/sun4i-gpadc-iio.c +++ b/drivers/iio/adc/sun4i-gpadc-iio.c @@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val, err: pm_runtime_put_autosuspend(indio_dev->dev.parent); + disable_irq(irq); mutex_unlock(&info->mutex); return ret; @@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id) complete(&info->completion); out: - disable_irq_nosync(info->temp_data_irq); return IRQ_HANDLED; } @@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id) complete(&info->completion); out: - disable_irq_nosync(info->fifo_data_irq); return IRQ_HANDLED; } diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 01fc76f7d6602c7b14c5f0cd4ea0c01090278087..c168e0db329ab49b6b59d720cc62cb2b37352088 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -77,7 +77,7 @@ #define VF610_ADC_ADSTS_MASK 0x300 #define VF610_ADC_ADLPC_EN 0x80 #define VF610_ADC_ADHSC_EN 0x400 -#define VF610_ADC_REFSEL_VALT 0x100 +#define VF610_ADC_REFSEL_VALT 0x800 #define VF610_ADC_REFSEL_VBG 0x1000 #define VF610_ADC_ADTRG_HARD 0x2000 #define VF610_ADC_AVGS_8 0x4000 diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 79c8c7cd70d5c6d74fc2e32cad372f8644233c6b..6e6a1ecc99ddf4b69252b6b4232c9cda6f00d4be 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -550,6 +550,31 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, } EXPORT_SYMBOL(st_sensors_read_info_raw); +static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, + const struct st_sensor_settings *sensor_settings) +{ + struct st_sensor_data *sdata = iio_priv(indio_dev); + struct device_node *np = sdata->dev->of_node; + struct st_sensors_platform_data *pdata; + + pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; + if (((np && of_property_read_bool(np, "spi-3wire")) || + (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { + int err; + + err = sdata->tf->write_byte(&sdata->tb, sdata->dev, + sensor_settings->sim.addr, + sensor_settings->sim.value); + if (err < 0) { + dev_err(&indio_dev->dev, + "failed to init interface mode\n"); + return err; + } + } + + return 0; +} + int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensor_settings *sensor_settings) @@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, return -ENODEV; } + err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); + if (err < 0) + return err; + if (sensor_settings[i].wai_addr) { err = sdata->tf->read_byte(&sdata->tb, sdata->dev, sensor_settings[i].wai_addr, &wai); diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index e7d4ea75e007c0bd82ef087812524258ec69f490..7599693f7fe9597cb750319ccb2635362c1dc9f0 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) struct tsl2563_chip *chip = iio_priv(dev_info); iio_push_event(dev_info, - IIO_UNMOD_EVENT_CODE(IIO_LIGHT, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index aa61ec15c1396ca3925ecf1a099fbf91a302dae7..f1bce05ffa135703792f24317db81375d3e4ebb3 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, - .multi_read_bit = true, + .multi_read_bit = false, .bootime = 2, }, }; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 01236cef7bfb1affe07e4214cf6d8baf6ca2a2a1..437522ca97b4b62fd79b8e84fa643ff9c4751ccd 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -61,6 +61,7 @@ struct addr_req { void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); unsigned long timeout; + struct delayed_work work; int status; u32 seq; }; @@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr, } EXPORT_SYMBOL(rdma_translate_ip); -static void set_timeout(unsigned long time) +static void set_timeout(struct delayed_work *delayed_work, unsigned long time) { unsigned long delay; @@ -303,7 +304,7 @@ static void set_timeout(unsigned long time) if ((long)delay < 0) delay = 0; - mod_delayed_work(addr_wq, &work, delay); + mod_delayed_work(addr_wq, delayed_work, delay); } static void queue_req(struct addr_req *req) @@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req) list_add(&req->list, &temp_req->list); - if (req_list.next == &req->list) - set_timeout(req->timeout); + set_timeout(&req->work, req->timeout); mutex_unlock(&lock); } @@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in, return ret; } +static void process_one_req(struct work_struct *_work) +{ + struct addr_req *req; + struct sockaddr *src_in, *dst_in; + + mutex_lock(&lock); + req = container_of(_work, struct addr_req, work.work); + + if (req->status == -ENODATA) { + src_in = (struct sockaddr *)&req->src_addr; + dst_in = (struct sockaddr *)&req->dst_addr; + req->status = addr_resolve(src_in, dst_in, req->addr, + true, req->seq); + if (req->status && time_after_eq(jiffies, req->timeout)) { + req->status = -ETIMEDOUT; + } else if (req->status == -ENODATA) { + /* requeue the work for retrying again */ + set_timeout(&req->work, req->timeout); + mutex_unlock(&lock); + return; + } + } + list_del(&req->list); + mutex_unlock(&lock); + + req->callback(req->status, (struct sockaddr *)&req->src_addr, + req->addr, req->context); + put_client(req->client); + kfree(req); +} + static void process_req(struct work_struct *work) { struct addr_req *req, *temp_req; @@ -591,20 +622,23 @@ static void process_req(struct work_struct *work) true, req->seq); if (req->status && time_after_eq(jiffies, req->timeout)) req->status = -ETIMEDOUT; - else if (req->status == -ENODATA) + else if (req->status == -ENODATA) { + set_timeout(&req->work, req->timeout); continue; + } } list_move_tail(&req->list, &done_list); } - if (!list_empty(&req_list)) { - req = list_entry(req_list.next, struct addr_req, list); - set_timeout(req->timeout); - } mutex_unlock(&lock); list_for_each_entry_safe(req, temp_req, &done_list, list) { list_del(&req->list); + /* It is safe to cancel other work items from this work item + * because at a time there can be only one work item running + * with this single threaded work queue. + */ + cancel_delayed_work(&req->work); req->callback(req->status, (struct sockaddr *) &req->src_addr, req->addr, req->context); put_client(req->client); @@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, req->context = context; req->client = client; atomic_inc(&client->refcount); + INIT_DELAYED_WORK(&req->work, process_one_req); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); @@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) req->status = -ECANCELED; req->timeout = jiffies; list_move(&req->list, &req_list); - set_timeout(req->timeout); + set_timeout(&req->work, req->timeout); break; } } @@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event, if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; - if (neigh->nud_state & NUD_VALID) { - set_timeout(jiffies); - } + if (neigh->nud_state & NUD_VALID) + set_timeout(&work, jiffies); } return 0; } @@ -820,7 +854,7 @@ static struct notifier_block nb = { int addr_init(void) { - addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); + addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); if (!addr_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a5dfab6adf495b88e846ee03b815536a7917bf48..221468f77128440acb2f5956d2ecf25bac7cc80d 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device) } up_read(&lists_rwsem); - mutex_unlock(&device_mutex); - ib_device_unregister_rdmacg(device); ib_device_unregister_sysfs(device); + + mutex_unlock(&device_mutex); + ib_cache_cleanup_one(device); ib_security_destroy_port_pkey_list(device); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2c98533a0203b084fb198a3eb8088a0bac59522c..c551d2b275fdf339310a087bef9c6e821d7c7e09 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, int out_len) { struct ib_uverbs_resize_cq cmd; - struct ib_uverbs_resize_cq_resp resp; + struct ib_uverbs_resize_cq_resp resp = {}; struct ib_udata udata; struct ib_cq *cq; int ret = -EINVAL; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3d2609608f589625d0077167fa2e66a00430b89f..5e530d2bee4448ddcb68724199a5c93d36eec32a 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref) if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); + kobject_put(&file->device->kobj); kfree(file); } @@ -917,7 +918,6 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; - struct ib_uverbs_device *dev = file->device; mutex_lock(&file->cleanup_mutex); if (file->ucontext) { @@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) ib_uverbs_release_async_event_file); kref_put(&file->ref, ib_uverbs_release_file); - kobject_put(&dev->kobj); return 0; } @@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, kref_get(&file->ref); mutex_unlock(&uverbs_dev->lists_mutex); - ib_uverbs_event_handler(&file->event_handler, &event); mutex_lock(&file->cleanup_mutex); ucontext = file->ucontext; @@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, * for example due to freeing the resources * (e.g mmput). */ + ib_uverbs_event_handler(&file->event_handler, &event); ib_dev->disassociate_ucontext(ucontext); mutex_lock(&file->cleanup_mutex); ib_uverbs_cleanup_ucontext(file, ucontext, true); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index fb98ed67d5bc684b8cc0b941d7140986b95aa99b..7f8fe443df46f5b562ac3b2561e19226e3ab6b68 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -895,7 +895,6 @@ static const struct { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, - [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = { diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 5332f06b99ba4cfa3ef404ec816d09fb68ec0ddf..c2fba76becd4e985a35b9ed3d1ec1314c955706b 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, rhp = php->rhp; if (mr_type != IB_MR_TYPE_MEM_REG || - max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && + max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl)) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index f78a733a63ec7e1f884930a72e53ba2100ee888e..d545302b8ef8c520d4eceb630a39db06b7a584a4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, } else { u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); - if (!dmac) + if (!dmac) { + kfree(ah); return ERR_PTR(-EINVAL); + } memcpy(ah->av.mac, dmac, ETH_ALEN); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 23fad6d969440bd2bd50a0c8b0dbafe8a92f4ae9..2540b65e242cebcf5b7c9fd60f936bc35bbf019b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) continue; free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); - if (IS_ERR(free_mr->mr_free_qp[i])) { + if (!free_mr->mr_free_qp[i]) { dev_err(dev, "Create loop qp failed!\n"); goto create_lp_qp_failed; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 9ec1ae9a82c9843878ea10266b0d5338d8862308..a49ff2eb6fb3bca54f40bc460238eeb4a5b4ba52 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( u64 base = 0; u32 i, j; u32 k = 0; - u32 low; /* copy base values in obj_info */ - for (i = I40IW_HMC_IW_QP, j = 0; - i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + if ((i == I40IW_HMC_IW_SRQ) || + (i == I40IW_HMC_IW_FSIMC) || + (i == I40IW_HMC_IW_FSIAV)) { + info[i].base = 0; + info[i].cnt = 0; + continue; + } get_64bit_val(buf, j, &temp); info[i].base = RS_64_1(temp, 32) * 512; if (info[i].base > base) { base = info[i].base; k = i; } - low = (u32)(temp); - if (low) - info[i].cnt = low; + if (i == I40IW_HMC_IW_APBVT_ENTRY) { + info[i].cnt = 1; + continue; + } + if (i == I40IW_HMC_IW_QP) + info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + else if (i == I40IW_HMC_IW_CQ) + info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + else + info[i].cnt = (u32)(temp); } size = info[k].cnt * info[k].size + info[k].base; if (size & 0x1FFFFF) @@ -154,6 +166,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( return 0; } +/** + * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size + * @buf: ptr to fpm query buffer + * @buf_idx: index into buf + * @info: ptr to i40iw_hmc_obj_info struct + * @rsrc_idx: resource index into info + * + * Decode a 64 bit value from fpm query buffer into max count and size + */ +static u64 i40iw_sc_decode_fpm_query(u64 *buf, + u32 buf_idx, + struct i40iw_hmc_obj_info *obj_info, + u32 rsrc_idx) +{ + u64 temp; + u32 size; + + get_64bit_val(buf, buf_idx, &temp); + obj_info[rsrc_idx].max_cnt = (u32)temp; + size = (u32)RS_64_1(temp, 32); + obj_info[rsrc_idx].size = LS_64_1(1, size); + + return temp; +} + /** * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer * @buf: ptr to fpm query buffer @@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( struct i40iw_hmc_info *hmc_info, struct i40iw_hmc_fpm_misc *hmc_fpm_misc) { - u64 temp; struct i40iw_hmc_obj_info *obj_info; - u32 i, j, size; + u64 temp; + u32 size; u16 max_pe_sds; obj_info = hmc_info->hmc_obj; @@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( hmc_fpm_misc->max_sds = max_pe_sds; hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; - for (i = I40IW_HMC_IW_QP, j = 8; - i <= I40IW_HMC_IW_ARP; i++, j += 8) { - get_64bit_val(buf, j, &temp); - if (i == I40IW_HMC_IW_QP) - obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); - else if (i == I40IW_HMC_IW_CQ) - obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); - else - obj_info[i].max_cnt = (u32)temp; + get_64bit_val(buf, 8, &temp); + obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + size = (u32)RS_64_1(temp, 32); + obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size); - size = (u32)RS_64_1(temp, 32); - obj_info[i].size = ((u64)1 << size); - } - for (i = I40IW_HMC_IW_MR, j = 48; - i <= I40IW_HMC_IW_PBLE; i++, j += 8) { - get_64bit_val(buf, j, &temp); - obj_info[i].max_cnt = (u32)temp; - size = (u32)RS_64_1(temp, 32); - obj_info[i].size = LS_64_1(1, size); - } + get_64bit_val(buf, 16, &temp); + obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + size = (u32)RS_64_1(temp, 32); + obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size); + + i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE); + i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP); + + obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; + obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; + + i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR); + i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF); - get_64bit_val(buf, 120, &temp); - hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); - get_64bit_val(buf, 120, &temp); - hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); - get_64bit_val(buf, 120, &temp); - hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); get_64bit_val(buf, 64, &temp); + obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_XFFL].size = 4; hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); if (!hmc_fpm_misc->xf_block_size) return I40IW_ERR_INVALID_SIZE; + + i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1); + get_64bit_val(buf, 80, &temp); + obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_Q1FL].size = 4; hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); if (!hmc_fpm_misc->q1_block_size) return I40IW_ERR_INVALID_SIZE; + + i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER); + + get_64bit_val(buf, 112, &temp); + obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_PBLE].size = 8; + + get_64bit_val(buf, 120, &temp); + hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); + hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); + hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); + return 0; } @@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_ hmc_info->sd_table.sd_entry = virt_mem.va; } - /* fill size of objects which are fixed */ - hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4; - hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4; - hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8; - hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; - hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; - return ret_code; } @@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) { u8 fcn_id = vsi->fcn_id; - if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) + if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT) vsi->dev->fcn_id_array[fcn_id] = false; i40iw_hw_stats_stop_timer(vsi); } diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index a39ac12b6a7e8430b80e42d9a38d7abd3b48d3b8..2ebaadbed379406e9d6c10f01fe0577ab515ebc9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -1507,8 +1507,8 @@ enum { I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), I40IW_SHADOWAREA_MASK = (128 - 1), - I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, - I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1), + I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1) }; enum i40iw_alignment { diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index 71050c5d29a05f3f6cb9433aafd7e09fbb5695aa..7f5583d83622a57821d1d4718c6bbc49d84410d0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, - I40IW_CQ0_ALIGNMENT_MASK); + I40IW_CQ0_ALIGNMENT); if (ret) return ret; diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h index 91c421762f06797be844f0834d7ff09ff36bb5f9..f7013f11d8085966bf6d9dab310f0f8d0f35422a 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_status.h +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h @@ -62,7 +62,7 @@ enum i40iw_status_code { I40IW_ERR_INVALID_ALIGNMENT = -23, I40IW_ERR_FLUSHED_QUEUE = -24, I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, - I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, + I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26, I40IW_ERR_TIMEOUT = -27, I40IW_ERR_OPCODE_MISMATCH = -28, I40IW_ERR_CQP_COMPL_ERROR = -29, diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c index b0d3a0e8a9b522a3549d63593a0af38f682e71ba..1060725d18bce852a108d6ce9e0b55cb460bfb0d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_uk.c +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c @@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, op_info = &info->op.inline_rdma_write; if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) - return I40IW_ERR_INVALID_IMM_DATA_SIZE; + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); if (ret_code) @@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, op_info = &info->op.inline_send; if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) - return I40IW_ERR_INVALID_IMM_DATA_SIZE; + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); if (ret_code) @@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); - info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); + info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM); info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); @@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, u8 *wqe_size) { if (data_size > I40IW_MAX_INLINE_DATA_SIZE) - return I40IW_ERR_INVALID_IMM_DATA_SIZE; + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; if (data_size <= 16) *wqe_size = I40IW_QP_WQE_MIN_SIZE; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index ae0746754008798fc0c4ab7e940f736c376a72f1..3d701c7a4c9140e488b7427d9d901a4ea77d2786 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( if (qp->ibqp.qp_type != IB_QPT_RC) { av = *wqe; - if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) + if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) *wqe += sizeof(struct mlx5_av); else *wqe += sizeof(struct mlx5_base_av); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 69bda611d31385044766915410ac72eb70cffac5..90aa326fd7c0974e0c5a70eb5d2b46f50d03c490 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, struct pvrdma_dev *dev = to_vdev(ibcq->device); struct pvrdma_cq *cq = to_vcq(ibcq); u32 val = cq->cq_handle; + unsigned long flags; + int has_data = 0; val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; + spin_lock_irqsave(&cq->cq_lock, flags); + pvrdma_write_uar_cq(dev, val); - return 0; + if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { + unsigned int head; + + has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, + cq->ibcq.cqe, &head); + if (unlikely(has_data == PVRDMA_INVALID_IDX)) + dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); + } + + spin_unlock_irqrestore(&cq->cq_lock, flags); + + return has_data; } /** diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ff50a7bd66d864506ec65aef1b63f45ce5d36e36..7ac25059c40f94aad951b28351cf425ebe573197 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -336,6 +336,7 @@ struct ipoib_dev_priv { unsigned long flags; struct rw_semaphore vlan_rwsem; + struct mutex mcast_mutex; struct rb_root path_tree; struct list_head path_list; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index f87d104837dcfab7f0e35b5b7fcae1e021599bfc..d69410c2ed97bdeceb17aedb2a7fe6049c59c310 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, case IB_CM_REQ_RECEIVED: return ipoib_cm_req_handler(cm_id, event); case IB_CM_DREQ_RECEIVED: - p = cm_id->context; ib_send_cm_drep(cm_id, NULL, 0); /* Fall through */ case IB_CM_REJ_RECEIVED: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7871379342f48fa77b2e6e8279ca774b4c49ad2f..184a22f4802773efc67131093f4ab4fcc89cd276 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = { IPOIB_NETDEV_STAT(tx_bytes), IPOIB_NETDEV_STAT(tx_errors), IPOIB_NETDEV_STAT(rx_dropped), - IPOIB_NETDEV_STAT(tx_dropped) + IPOIB_NETDEV_STAT(tx_dropped), + IPOIB_NETDEV_STAT(multicast), }; #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 57a9655e844deb1cc2eb57d9485f98e195368ac5..2e075377242e2baccc54cda5859d5b3ba7e768d0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && @@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev) return pending; } +static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, + struct ib_qp *qp, + enum ib_qp_state new_state) +{ + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr query_init_attr; + int ret; + + ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); + if (ret) { + ipoib_warn(priv, "%s: Failed to query QP\n", __func__); + return; + } + /* print according to the new-state and the previous state.*/ + if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) + ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); + else + ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", + new_state, qp_attr.qp_state); +} + int ipoib_ib_dev_stop_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) - ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); + check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); /* Wait for all sends and receives to complete */ begin = jiffies; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4ce315c92b480fa705c30b33ffd7253b4cfded3b..6c77df34869dfb719d66787f6ccbb7637b042d36 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) int i, wait_flushed = 0; init_completion(&priv->ntbl.flushed); + set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); spin_lock_irqsave(&priv->lock, flags); @@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); init_completion(&priv->ntbl.deleted); - set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); /* Stop GC if called at init fail need to cancel work */ stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); @@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = { .ndo_tx_timeout = ipoib_timeout, .ndo_set_rx_mode = ipoib_set_mcast_list, .ndo_get_iflink = ipoib_get_iflink, + .ndo_get_stats64 = ipoib_get_stats, }; void ipoib_setup_common(struct net_device *dev) @@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev) priv->dev = dev; spin_lock_init(&priv->lock); init_rwsem(&priv->vlan_rwsem); + mutex_init(&priv->mcast_mutex); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); @@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format, priv->dev->dev_id = port - 1; result = ib_query_port(hca, port, &attr); - if (!result) - priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); - else { + if (result) { printk(KERN_WARNING "%s: ib_query_port %d failed\n", hca->name, port); goto device_init_failed; } + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); + /* MTU will be reset when mcast join happens */ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; @@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format, printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", hca->name, port, result); goto device_init_failed; - } else - memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); + } + + memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, + sizeof(union ib_gid)); set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); result = ipoib_dev_init(priv->dev, hca, port); - if (result < 0) { + if (result) { printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", hca->name, port, result); goto device_init_failed; @@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void) ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); #ifdef CONFIG_INFINIBAND_IPOIB_CM ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); + ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); #endif /* diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 057f58e6afca249744f2d9013021e3c1c5d6417f..93e149efc1f5fc0382b61dcfc9f84d786d8b52ca 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev) int ipoib_mcast_stop_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - unsigned long flags; ipoib_dbg_mcast(priv, "stopping multicast thread\n"); - spin_lock_irqsave(&priv->lock, flags); - cancel_delayed_work(&priv->mcast_task); - spin_unlock_irqrestore(&priv->lock, flags); - - flush_workqueue(priv->wq); + cancel_delayed_work_sync(&priv->mcast_task); return 0; } @@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list) { struct ipoib_mcast *mcast, *tmcast; + /* + * make sure the in-flight joins have finished before we attempt + * to leave + */ + list_for_each_entry_safe(mcast, tmcast, remove_list, list) + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + wait_for_completion(&mcast->done); + list_for_each_entry_safe(mcast, tmcast, remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); @@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) struct ipoib_mcast *mcast, *tmcast; unsigned long flags; + mutex_lock(&priv->mcast_mutex); ipoib_dbg_mcast(priv, "flushing multicast list\n"); spin_lock_irqsave(&priv->lock, flags); @@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - ipoib_mcast_remove_list(&remove_list); + mutex_unlock(&priv->mcast_mutex); } static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) @@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) netif_addr_unlock(dev); local_irq_restore(flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - ipoib_mcast_remove_list(&remove_list); /* diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 3b616cb7c67f88512e82dbc8ba5f61e565cbd2a5..714cf7f9b13859988ca7df9f1fc98a10e0e6fbae 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1248,6 +1248,10 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0100", 0 }, { "ELAN0600", 0 }, { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, + { "ELAN0609", 0 }, + { "ELAN060B", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 922ea02edcc3ef6c091b572275245e7d5fabd3f0..20b5b21c1bba8892f37aab4a0190dfd6352b72d1 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -380,8 +380,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) return 0; if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { - psmouse_warn(psmouse, "failed to get extended button data\n"); - button_info = 0; + psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); + button_info = 0x33; } psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 688e77576e5a50b3f2f137eec72cd721d6bf96fa..354cbd6392cdf261ba657548ed2c208a09ddf50f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) /* Setting */ irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); irte->hi.fields.vector = vcpu_pi_info->vector; + irte->lo.fields_vapic.ga_log_intr = 1; irte->lo.fields_vapic.guest_mode = 1; irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5cc597b383c7208d69e83d8ec0646e936817d1b4..372303700566f4f984e6656bd937e2d99bf07cc6 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2440,11 +2440,11 @@ static int __init state_next(void) break; case IOMMU_ACPI_FINISHED: early_enable_iommus(); - register_syscore_ops(&amd_iommu_syscore_ops); x86_platform.iommu_shutdown = disable_iommus; init_state = IOMMU_ENABLED; break; case IOMMU_ENABLED: + register_syscore_ops(&amd_iommu_syscore_ops); ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; enable_iommus_v2(); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index bc89b4d6c043dacee88463ba22edc8883f60385e..2d80fa8a0634aba34b366609d8bcc50f432bb31c 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -400,6 +400,8 @@ struct arm_smmu_device { u32 cavium_id_base; /* Specific to Cavium */ + spinlock_t global_sync_lock; + /* IOMMU core code handle */ struct iommu_device iommu; }; @@ -436,7 +438,7 @@ struct arm_smmu_domain { struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ - spinlock_t cb_lock; /* Serialises ATS1* ops */ + spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; }; @@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) { void __iomem *base = ARM_SMMU_GR0(smmu); + unsigned long flags; + spin_lock_irqsave(&smmu->global_sync_lock, flags); __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, base + ARM_SMMU_GR0_sTLBGSTATUS); + spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } static void arm_smmu_tlb_sync_context(void *cookie) @@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie) struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); + unsigned long flags; + spin_lock_irqsave(&smmu_domain->cb_lock, flags); __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, base + ARM_SMMU_CB_TLBSTATUS); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); } static void arm_smmu_tlb_sync_vmid(void *cookie) @@ -1511,6 +1519,12 @@ static int arm_smmu_add_device(struct device *dev) if (using_legacy_binding) { ret = arm_smmu_register_legacy_master(dev, &smmu); + + /* + * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() + * will allocate/initialise a new one. Thus we need to update fwspec for + * later use. + */ fwspec = dev->iommu_fwspec; if (ret) goto out_free; @@ -1550,15 +1564,15 @@ static int arm_smmu_add_device(struct device *dev) ret = arm_smmu_master_alloc_smes(dev); if (ret) - goto out_free; + goto out_cfg_free; iommu_device_link(&smmu->iommu, dev); return 0; +out_cfg_free: + kfree(cfg); out_free: - if (fwspec) - kfree(fwspec->iommu_priv); iommu_fwspec_free(dev); return ret; } @@ -1925,6 +1939,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->num_mapping_groups = size; mutex_init(&smmu->stream_map_mutex); + spin_lock_init(&smmu->global_sync_lock); if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index af330f513653d2849682b2d4536dd86dcb6a43d7..d665d0dc16e8f787813a6106d15bd83afacc4f34 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(prot & (IOMMU_READ | IOMMU_WRITE))) return 0; + if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) + return -ERANGE; + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); /* * Synchronise all PTE updates for the new mapping before there's @@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); size_t unmapped; + if (WARN_ON(upper_32_bits(iova))) + return 0; + unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); if (unmapped) io_pgtable_tlb_sync(&data->iop); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b182039862c50debf8c55df4aad825cee468eed6..e8018a308868e33a4ea722c0b9686118078dc0db 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) return 0; + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || + paddr >= (1ULL << data->iop.cfg.oas))) + return -ERANGE; + prot = arm_lpae_prot_to_pte(data, iommu_prot); ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); /* @@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, arm_lpae_iopte *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) + return 0; + unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); if (unmapped) io_pgtable_tlb_sync(&data->iop); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 524263a7ae6f54c290d9fc017633e0ac074054df..a3e667077b14e12ccca5df742922fe47d4e64005 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops); * @fmt: The page table format. * @cookie: An opaque token provided by the IOMMU driver and passed back to * any callback routines. - * @tlb_sync_pending: Private flag for optimising out redundant syncs. * @cfg: A copy of the page table configuration. * @ops: The page table operations in use for this set of page tables. */ struct io_pgtable { enum io_pgtable_fmt fmt; void *cookie; - bool tlb_sync_pending; struct io_pgtable_cfg cfg; struct io_pgtable_ops ops; }; @@ -175,22 +173,17 @@ struct io_pgtable { static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { iop->cfg.tlb->tlb_flush_all(iop->cookie); - iop->tlb_sync_pending = true; } static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, unsigned long iova, size_t size, size_t granule, bool leaf) { iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); - iop->tlb_sync_pending = true; } static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) { - if (iop->tlb_sync_pending) { - iop->cfg.tlb->tlb_sync(iop->cookie); - iop->tlb_sync_pending = false; - } + iop->cfg.tlb->tlb_sync(iop->cookie); } /** diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5d14cd15198db5cb6361d060abf208260c086ebe..91c6d367ab3593b99a6c9cfb9be01855e8372af2 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); + data->tlb_flush_active = true; } static void mtk_iommu_tlb_sync(void *cookie) @@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie) int ret; u32 tmp; + /* Avoid timing out if there's nothing to wait for */ + if (!data->tlb_flush_active) + return; + ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, tmp != 0, 10, 100000); if (ret) { @@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie) } /* Clear the CPE status */ writel_relaxed(0, data->base + REG_MMU_CPE_DONE); + data->tlb_flush_active = false; } static const struct iommu_gather_ops mtk_iommu_gather_ops = { diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 2a28eadeea0ec3cf2ad363f7076f8555245520a2..c06cc91b5d9a1e63ec0984927b6c7722b5242f82 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -47,6 +47,7 @@ struct mtk_iommu_data { struct iommu_group *m4u_group; struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ bool enable_4GB; + bool tlb_flush_active; struct iommu_device iommu; }; diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 28b26c80f4cf937b8547328b5d724a69e51b7d05..072bd227b6c677b40fee80f99e1364d733e681a3 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) #define AT91_RTC_IMR 0x28 #define AT91_RTC_IRQ_MASK 0x1f -void __init aic_common_rtc_irq_fixup(struct device_node *root) +void __init aic_common_rtc_irq_fixup(void) { struct device_node *np; void __iomem *regs; - np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); + np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc"); if (!np) - np = of_find_compatible_node(root, NULL, + np = of_find_compatible_node(NULL, NULL, "atmel,at91sam9x5-rtc"); if (!np) @@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root) #define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ #define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ -void __init aic_common_rtt_irq_fixup(struct device_node *root) +void __init aic_common_rtt_irq_fixup(void) { struct device_node *np; void __iomem *regs; @@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches) return; match = of_match_node(matches, root); - of_node_put(root); if (match) { - void (*fixup)(struct device_node *) = match->data; - fixup(root); + void (*fixup)(void) = match->data; + fixup(); } of_node_put(root); diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h index af60376d50debe30132acd00c58254c4d1a7ab9a..242e62c1851ead9744442e0236fa84198f745d7f 100644 --- a/drivers/irqchip/irq-atmel-aic-common.h +++ b/drivers/irqchip/irq-atmel-aic-common.h @@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, const char *name, int nirqs, const struct of_device_id *matches); -void __init aic_common_rtc_irq_fixup(struct device_node *root); +void __init aic_common_rtc_irq_fixup(void); -void __init aic_common_rtt_irq_fixup(struct device_node *root); +void __init aic_common_rtt_irq_fixup(void); #endif /* __IRQ_ATMEL_AIC_COMMON_H */ diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 37f952dd9fc94bdc5faa6c2721822b8780dd46e1..bb1ad451392fd8b7a315a6b2c5bea523af04a62a 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = { .xlate = aic_irq_domain_xlate, }; -static void __init at91rm9200_aic_irq_fixup(struct device_node *root) +static void __init at91rm9200_aic_irq_fixup(void) { - aic_common_rtc_irq_fixup(root); + aic_common_rtc_irq_fixup(); } -static void __init at91sam9260_aic_irq_fixup(struct device_node *root) +static void __init at91sam9260_aic_irq_fixup(void) { - aic_common_rtt_irq_fixup(root); + aic_common_rtt_irq_fixup(); } -static void __init at91sam9g45_aic_irq_fixup(struct device_node *root) +static void __init at91sam9g45_aic_irq_fixup(void) { - aic_common_rtc_irq_fixup(root); - aic_common_rtt_irq_fixup(root); + aic_common_rtc_irq_fixup(); + aic_common_rtt_irq_fixup(); } static const struct of_device_id aic_irq_fixups[] __initconst = { diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index c04ee9a23d094f9d6a9e0c95a7451cf73626727a..6acad2ea0fb3565a2604ecbb3fb5235395d62fad 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = { .xlate = aic5_irq_domain_xlate, }; -static void __init sama5d3_aic_irq_fixup(struct device_node *root) +static void __init sama5d3_aic_irq_fixup(void) { - aic_common_rtc_irq_fixup(root); + aic_common_rtc_irq_fixup(); } static const struct of_device_id aic5_irq_fixups[] __initconst = { diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index bddf169c4b37b7c9d2a91518aa233480dbb52f5d..b009b916a2923504414aafe6961b404614770da5 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, ct->chip.irq_suspend = brcmstb_l2_intc_suspend; ct->chip.irq_resume = brcmstb_l2_intc_resume; + ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend; if (data->can_wake) { /* This IRQ chip can wake the system, set all child interrupts diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 249240d9a4259eb72b7afb68c2e2723a72e7c9c5..833a90fe33aed839a81b781831027e677eef5581 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, *dev_id = args.args[0]; break; } + index++; } while (!ret); return ret; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 68932873eebc0c3d14caa00b048d0c961e765a98..284738add89b3b94f77ad257fd314bc89a423cae 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1835,7 +1835,7 @@ static int __init its_of_probe(struct device_node *node) #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) -#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531) +#ifdef CONFIG_ACPI_NUMA struct its_srat_map { /* numa node id */ u32 numa_node; @@ -1843,7 +1843,7 @@ struct its_srat_map { u32 its_id; }; -static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata; +static struct its_srat_map *its_srat_maps __initdata; static int its_in_srat __initdata; static int __init acpi_get_its_numa_node(u32 its_id) @@ -1857,6 +1857,12 @@ static int __init acpi_get_its_numa_node(u32 its_id) return NUMA_NO_NODE; } +static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, + const unsigned long end) +{ + return 0; +} + static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, const unsigned long end) { @@ -1873,12 +1879,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, return -EINVAL; } - if (its_in_srat >= MAX_NUMNODES) { - pr_err("SRAT: ITS affinity exceeding max count[%d]\n", - MAX_NUMNODES); - return -EINVAL; - } - node = acpi_map_pxm_to_node(its_affinity->proximity_domain); if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { @@ -1897,14 +1897,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, static void __init acpi_table_parse_srat_its(void) { + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc(count * sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) { + pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); + return; + } + acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, gic_acpi_parse_srat_its, 0); } + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} #else static void __init acpi_table_parse_srat_its(void) { } static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } #endif static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, @@ -1951,6 +1974,7 @@ static void __init its_acpi_probe(void) acpi_table_parse_srat_its(); acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); } #else static void __init its_acpi_probe(void) { } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index dbffb7ab62033b346ca7fc3b3468d6bcb3a19a63..984c3ecfd22c21aff59b3cad177c676348d86dd4 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs if (static_key_true(&supports_deactivate)) gic_write_eoir(irqnr); + else + isb(); err = handle_domain_irq(gic_data.domain, irqnr, regs); if (err) { @@ -640,11 +642,16 @@ static void gic_smp_init(void) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; void __iomem *reg; int enabled; u64 val; + if (force) + cpu = cpumask_first(mask_val); + else + cpu = cpumask_any_and(mask_val, cpu_online_mask); + if (cpu >= nr_cpu_ids) return -EINVAL; @@ -831,8 +838,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; - for (i = 0; i < nr_irqs; i++) - gic_irq_domain_map(domain, virq + i, hwirq + i); + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } return 0; } diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1b1df4f770bdefe0a16e0109624801f8af90f1f7..d3e7c43718b82b9e7fb2191dedadab1950c3ac7d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -361,6 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) if (likely(irqnr > 15 && irqnr < 1020)) { if (static_key_true(&supports_deactivate)) writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); + isb(); handle_domain_irq(gic->domain, irqnr, regs); continue; } @@ -401,10 +402,12 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) goto out; cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); - if (unlikely(gic_irq < 32 || gic_irq > 1020)) + if (unlikely(gic_irq < 32 || gic_irq > 1020)) { handle_bad_irq(desc); - else + } else { + isb(); generic_handle_irq(cascade_irq); + } out: chained_irq_exit(chip, desc); @@ -1027,8 +1030,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; - for (i = 0; i < nr_irqs; i++) - gic_irq_domain_map(domain, virq + i, hwirq + i); + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } return 0; } diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 7b5fd8fb1761d1912be615ee14571e420dc4c92b..aaca0b3d662eb18bda0848652c309985d5f6ad0e 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c @@ -44,7 +44,6 @@ struct procdata { char log_name[15]; /* log filename */ struct log_data *log_head, *log_tail; /* head and tail for queue */ int if_used; /* open count for interface */ - int volatile del_lock; /* lock for delete operations */ unsigned char logtmp[LOG_MAX_LINELEN]; wait_queue_head_t rd_queue; }; @@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp) { struct log_data *ib; struct procdata *pd = card->proclog; - int i; unsigned long flags; if (!pd) @@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp) else pd->log_tail->next = ib; /* follows existing messages */ pd->log_tail = ib; /* new tail */ - i = pd->del_lock++; /* get lock state */ - spin_unlock_irqrestore(&card->hysdn_lock, flags); /* delete old entrys */ - if (!i) - while (pd->log_head->next) { - if ((pd->log_head->usage_cnt <= 0) && - (pd->log_head->next->usage_cnt <= 0)) { - ib = pd->log_head; - pd->log_head = pd->log_head->next; - kfree(ib); - } else - break; - } /* pd->log_head->next */ - pd->del_lock--; /* release lock level */ + while (pd->log_head->next) { + if ((pd->log_head->usage_cnt <= 0) && + (pd->log_head->next->usage_cnt <= 0)) { + ib = pd->log_head; + pd->log_head = pd->log_head->next; + kfree(ib); + } else { + break; + } + } /* pd->log_head->next */ + + spin_unlock_irqrestore(&card->hysdn_lock, flags); + wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ } /* put_log_buffer */ diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 89b09c51ab7cff64d20e24ff43fff8bd720d1ef3..38a5bb764c7b55cb8b742639e49756e413b4ab26 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; + bname[sizeof(bname)-1] = 0; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index c151c6daa67ee8aa2ae600b4462f146d85776335..f63a110b7bcb2d2257869484bd9b894d94b9b5d2 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm) char newname[10]; if (p) { - /* Slave-Name MUST not be empty */ - if (!strlen(p + 1)) + /* Slave-Name MUST not be empty or overflow 'newname' */ + if (strscpy(newname, p + 1, sizeof(newname)) <= 0) return NULL; - strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c index 78fc5d5e90514353b258658da350a85c408b89db..92e6570b11435e7e4dade7aa433231008c82b294 100644 --- a/drivers/isdn/mISDN/fsm.c +++ b/drivers/isdn/mISDN/fsm.c @@ -26,7 +26,7 @@ #define FSM_TIMER_DEBUG 0 -void +int mISDN_FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount) { @@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm, fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL); + if (fsm->jumpmatrix == NULL) + return -ENOMEM; for (i = 0; i < fncount; i++) if ((fnlist[i].state >= fsm->state_count) || @@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm, } else fsm->jumpmatrix[fsm->state_count * fnlist[i].event + fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; + return 0; } EXPORT_SYMBOL(mISDN_FsmNew); diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h index 928f5be192c1fd4a5c8197160efb9dad21e1141d..e1def84902212e1637cf96f0c9cefb8c3950edbd 100644 --- a/drivers/isdn/mISDN/fsm.h +++ b/drivers/isdn/mISDN/fsm.h @@ -55,7 +55,7 @@ struct FsmTimer { void *arg; }; -extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); +extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); extern void mISDN_FsmFree(struct Fsm *); extern int mISDN_FsmEvent(struct FsmInst *, int , void *); extern void mISDN_FsmChangeState(struct FsmInst *, int); diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index bebc57b72138e721aa5a290482ae46e748aac2d2..3192b0eb39445435d7b38c0f914bded7c93b2fac 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c @@ -414,8 +414,7 @@ l1_init(u_int *deb) l1fsm_s.event_count = L1_EVENT_COUNT; l1fsm_s.strEvent = strL1Event; l1fsm_s.strState = strL1SState; - mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); - return 0; + return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); } void diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 7243a6746f8b099d79d3221def6dcfcf138a6f51..9ff0903a0e89fb2f4f7373e1ef4d1d1663934b36 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c @@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = { int Isdnl2_Init(u_int *deb) { + int res; debug = deb; mISDN_register_Bprotocol(&X75SLP); l2fsm.state_count = L2_STATE_COUNT; l2fsm.event_count = L2_EVENT_COUNT; l2fsm.strEvent = strL2Event; l2fsm.strState = strL2State; - mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); - TEIInit(deb); + res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); + if (res) + goto error; + res = TEIInit(deb); + if (res) + goto error_fsm; return 0; + +error_fsm: + mISDN_FsmFree(&l2fsm); +error: + mISDN_unregister_Bprotocol(&X75SLP); + return res; } void diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index 908127efccf8ceb94ef2ab1ed7df4b17b26ce5b9..12d9e5f4beb1f81c5aa5e5af81bc9aca61c21668 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c @@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev) int TEIInit(u_int *deb) { + int res; debug = deb; teifsmu.state_count = TEI_STATE_COUNT; teifsmu.event_count = TEI_EVENT_COUNT; teifsmu.strEvent = strTeiEvent; teifsmu.strState = strTeiState; - mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); + res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); + if (res) + goto error; teifsmn.state_count = TEI_STATE_COUNT; teifsmn.event_count = TEI_EVENT_COUNT; teifsmn.strEvent = strTeiEvent; teifsmn.strState = strTeiState; - mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); + res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); + if (res) + goto error_smn; deactfsm.state_count = DEACT_STATE_COUNT; deactfsm.event_count = DEACT_EVENT_COUNT; deactfsm.strEvent = strDeactEvent; deactfsm.strState = strDeactState; - mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); + res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); + if (res) + goto error_deact; return 0; + +error_deact: + mISDN_FsmFree(&teifsmn); +error_smn: + mISDN_FsmFree(&teifsmu); +error: + return res; } void TEIFree(void) diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 5ecc154f6831e8f17341495ccfde286757960b29..9bc32578a766e1581d315dc23337214d75fd38d3 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -657,7 +657,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, * be directed to disk. */ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, - struct ppa_addr ppa, int bio_iter) + struct ppa_addr ppa, int bio_iter, bool advanced_bio) { struct pblk *pblk = container_of(rb, struct pblk, rwb); struct pblk_rb_entry *entry; @@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, * filled with data from the cache). If part of the data resides on the * media, we will read later on */ - if (unlikely(!bio->bi_iter.bi_idx)) + if (unlikely(!advanced_bio)) bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); data = bio_data(bio); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 4e5c48f3de628d64c806bb21f14c4bd7a473d9b3..d682e89e649351f5811232427a7aeab1009706f0 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -26,7 +26,7 @@ */ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, sector_t lba, struct ppa_addr ppa, - int bio_iter) + int bio_iter, bool advanced_bio) { #ifdef CONFIG_NVM_DEBUG /* Callers must ensure that the ppa points to a cache address */ @@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, BUG_ON(!pblk_addr_in_cache(ppa)); #endif - return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter); + return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, + bio_iter, advanced_bio); } static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, @@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; sector_t blba = pblk_get_lba(bio); int nr_secs = rqd->nr_ppas; - int advanced_bio = 0; + bool advanced_bio = false; int i, j = 0; /* logic error: lba out-of-bounds. Ignore read request */ @@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, retry: if (pblk_ppa_empty(p)) { WARN_ON(test_and_set_bit(i, read_bitmap)); - continue; + + if (unlikely(!advanced_bio)) { + bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); + advanced_bio = true; + } + + goto next; } /* Try to read from write buffer. The address is later checked * on the write buffer to prevent retrieving overwritten data. */ if (pblk_addr_in_cache(p)) { - if (!pblk_read_from_cache(pblk, bio, lba, p, i)) { + if (!pblk_read_from_cache(pblk, bio, lba, p, i, + advanced_bio)) { pblk_lookup_l2p_seq(pblk, &p, lba, 1); goto retry; } WARN_ON(test_and_set_bit(i, read_bitmap)); - advanced_bio = 1; + advanced_bio = true; #ifdef CONFIG_NVM_DEBUG atomic_long_inc(&pblk->cache_reads); #endif @@ -83,6 +91,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, rqd->ppa_list[j++] = p; } +next: if (advanced_bio) bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } @@ -282,7 +291,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, * write buffer to prevent retrieving overwritten data. */ if (pblk_addr_in_cache(ppa)) { - if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) { + if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) { pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); goto retry; } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 0c5692cc2f605861da62a93de7a61aafb3f09f75..67e623bd5c2df1f69fecd084def0d793da7eab7c 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio, struct list_head *list, unsigned int max); int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, - struct ppa_addr ppa, int bio_iter); + struct ppa_addr ppa, int bio_iter, bool advanced_bio); unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index ac91fd0d62c6e4e22c57ab354ceb56adfdaee48e..cbca5e51b9759c57813b8579a2d38318bab39e02 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -92,7 +92,7 @@ static struct mbox_controller pcc_mbox_ctrl = {}; */ static struct mbox_chan *get_pcc_channel(int id) { - if (id < 0 || id > pcc_mbox_ctrl.num_chans) + if (id < 0 || id >= pcc_mbox_ctrl.num_chans) return ERR_PTR(-ENOENT); return &pcc_mbox_channels[id]; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 850ff6c6799449541cf8c0357cf7efa17d9e8c60..44f4a8ac95bd5a3a0f7742291c4827a27e27b3b5 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); */ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) { - blk_status_t a; - int f; + int a, f; unsigned long buffers_processed = 0; struct dm_buffer *b, *tmp; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 1b224aa9cf15213ac22bafe6f27d2206d37ff1b5..3acce09bba35c54b1afe4e8af962766bfd90eb73 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1587,16 +1587,18 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map if (likely(ic->mode == 'J')) { if (dio->write) { unsigned next_entry, i, pos; - unsigned ws, we; + unsigned ws, we, range_sectors; - dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); + dio->range.n_sectors = min(dio->range.n_sectors, + ic->free_sectors << ic->sb->log2_sectors_per_block); if (unlikely(!dio->range.n_sectors)) goto sleep; - ic->free_sectors -= dio->range.n_sectors; + range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; + ic->free_sectors -= range_sectors; journal_section = ic->free_section; journal_entry = ic->free_section_entry; - next_entry = ic->free_section_entry + dio->range.n_sectors; + next_entry = ic->free_section_entry + range_sectors; ic->free_section_entry = next_entry % ic->journal_section_entries; ic->free_section += next_entry / ic->journal_section_entries; ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; @@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic) wraparound_section(ic, &ic->free_section); ic->n_uncommitted_sections++; } + WARN_ON(ic->journal_sections * ic->journal_section_entries != + (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors); } static void integrity_commit(struct work_struct *w) @@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, { unsigned i, j, n; struct journal_completion comp; + struct blk_plug plug; + + blk_start_plug(&plug); comp.ic = ic; comp.in_flight = (atomic_t)ATOMIC_INIT(1); @@ -1945,6 +1952,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, dm_bufio_write_dirty_buffers_async(ic->bufio); + blk_finish_plug(&plug); + complete_journal_op(&comp); wait_for_completion_io(&comp.comp); @@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Block size doesn't match the information in superblock"; goto bad; } + if (!le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; + ti->error = "Corrupted superblock, journal_sections is 0"; + goto bad; + } /* make sure that ti->max_io_len doesn't overflow */ if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2e10c2f13a34986c9d8d445e1b00ea94261b2628..5bfe285ea9d1c815ae8014064a29e4a08566d374 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -208,6 +208,7 @@ struct raid_dev { #define RT_FLAG_RS_BITMAP_LOADED 2 #define RT_FLAG_UPDATE_SBS 3 #define RT_FLAG_RESHAPE_RS 4 +#define RT_FLAG_RS_SUSPENDED 5 /* Array elements of 64 bit needed for rebuild/failed disk bits */ #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) @@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout) if (__raid10_near_copies(layout) > 1) return "near"; - WARN_ON(__raid10_far_copies(layout) < 2); + if (__raid10_far_copies(layout) > 1) + return "far"; - return "far"; + return "unknown"; } /* Return md raid10 algorithm for @name */ @@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (!freshest) return 0; - if (validate_raid_redundancy(rs)) { - rs->ti->error = "Insufficient redundancy to activate array"; - return -EINVAL; - } - /* * Validation of the freshest device provides the source of * validation for the remaining devices. @@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (super_validate(rs, freshest)) return -EINVAL; + if (validate_raid_redundancy(rs)) { + rs->ti->error = "Insufficient redundancy to activate array"; + return -EINVAL; + } + rdev_for_each(rdev, mddev) if (!test_bit(Journal, &rdev->flags) && rdev != freshest && @@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) } mddev_suspend(&rs->md); + set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ if (rs_is_raid456(rs)) { @@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti) { struct raid_set *rs = ti->private; - if (!rs->md.suspended) + if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) mddev_suspend(&rs->md); rs->md.ro = 1; @@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs) return r; /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ - if (mddev->suspended) + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) mddev_resume(mddev); /* @@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs) } /* Suspend because a resume will happen in raid_resume() */ - if (!mddev->suspended) - mddev_suspend(mddev); + set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); + mddev_suspend(mddev); /* * Now reshape got set up, update superblocks to @@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti) if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (mddev->suspended) + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) mddev_resume(mddev); } static struct target_type raid_target = { .name = "raid", - .version = {1, 11, 1}, + .version = {1, 12, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a39bcd9b982a443cdfdac2013fcbf5ce3561ac4e..28a4071cdf85e8ae15e8741d8770199a689de6b2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -20,6 +20,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "table" @@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) return false; } +static int device_dax_write_cache_enabled(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct dax_device *dax_dev = dev->dax_dev; + + if (!dax_dev) + return false; + + if (dax_write_cache_enabled(dax_dev)) + return true; + return false; +} + +static int dm_table_supports_dax_write_cache(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, + device_dax_write_cache_enabled, NULL)) + return true; + } + + return false; +} + static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); + if (dm_table_supports_dax_write_cache(t)) + dax_write_cache(t->md->dax_dev, true); + /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 504ba3fa328b2ebb0d3abf40097d297eb08d9de4..e13f90832b6b54256f88d45ba30cee16660f58ef 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) { unsigned n; - if (!fio->rs) { - fio->rs = mempool_alloc(v->fec->rs_pool, 0); - if (unlikely(!fio->rs)) { - DMERR("failed to allocate RS"); - return -ENOMEM; - } - } + if (!fio->rs) + fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO); fec_for_each_prealloc_buffer(n) { if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); + fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT); if (unlikely(!fio->bufs[n])) { DMERR("failed to allocate FEC buffer"); return -ENOMEM; @@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); + fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT); /* we can manage with even one buffer if necessary */ if (unlikely(!fio->bufs[n])) break; } fio->nbufs = n; - if (!fio->output) { + if (!fio->output) fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); - if (!fio->output) { - DMERR("failed to allocate FEC page"); - return -ENOMEM; - } - } - return 0; } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 884ff7c170a0450a308d5405ada1c72de9b76a58..a4fa2ada688365da1f8f3f896bc3475eccc2c9d6 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); if (ret == 0) - ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); return ret; } @@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, /* Flush drive cache (this will also sync data) */ if (ret == 0) - ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); return ret; } @@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) /* If there are no dirty metadata blocks, just flush the device cache */ if (list_empty(&write_list)) { - ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); goto out; } @@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); } - page = alloc_page(GFP_KERNEL); + page = alloc_page(GFP_NOIO); if (!page) return -ENOMEM; @@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) /* Get zone information from disk */ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), - &blkz, &nr_blkz, GFP_KERNEL); + &blkz, &nr_blkz, GFP_NOIO); if (ret) { dmz_dev_err(zmd->dev, "Get zone %u report failed", dmz_id(zmd, zone)); @@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) ret = blkdev_reset_zones(dev->bdev, dmz_start_sect(zmd, zone), - dev->zone_nr_sectors, GFP_KERNEL); + dev->zone_nr_sectors, GFP_NOIO); if (ret) { dmz_dev_err(dev, "Reset zone %u failed %d", dmz_id(zmd, zone), ret); diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index 05c0a126f5c82c6dff4b0d924980654456cacf35..44a119e12f1abd8eb5b4e4ae15689c83a9502536 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, nr_blocks = block - wp_block; ret = blkdev_issue_zeroout(zrc->dev->bdev, dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), - dmz_blk2sect(nr_blocks), GFP_NOFS, false); + dmz_blk2sect(nr_blocks), GFP_NOIO, 0); if (ret) { dmz_dev_err(zrc->dev, "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 2b538fa817f43e6fbc851be50aeb17133f35f3e4..b08bbbd4d9027d8c4a7d07fd5e4786b25462f740 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) int ret; /* Create a new chunk work */ - cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS); + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); if (!cw) goto out; @@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) bio->bi_bdev = dev->bdev; - if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE)) + if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) return DM_MAPIO_REMAPPED; /* The BIO should be block aligned */ @@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) bioctx->status = BLK_STS_OK; /* Set the BIO pending in the flush list */ - if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) { + if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { spin_lock(&dmz->flush_lock); bio_list_add(&dmz->flush_list, bio); spin_unlock(&dmz->flush_lock); @@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Chunk BIO work */ mutex_init(&dmz->chunk_lock); - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS); + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, dev->name); if (!dmz->chunk_wq) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 8cdca029674975f2d365ef6d17aabbc684c33b8e..b01e458d31e94ce9eba98b9300366121de550619 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev) static bool set_in_sync(struct mddev *mddev) { - WARN_ON_ONCE(!spin_is_locked(&mddev->lock)); + WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock)); if (!mddev->in_sync) { mddev->sync_checkers++; spin_unlock(&mddev->lock); @@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) if (mddev->safemode == 1) mddev->safemode = 0; /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ - if (mddev->in_sync || !mddev->sync_checkers) { + if (mddev->in_sync || mddev->sync_checkers) { spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; @@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev) if (mddev_trylock(mddev)) { int spares = 0; + if (!mddev->external && mddev->safemode == 1) + mddev->safemode = 0; + if (mddev->ro) { struct md_rdev *rdev; if (!mddev->external && mddev->in_sync) diff --git a/drivers/md/md.h b/drivers/md/md.h index b50eb4ac1b8291444e1af5196b44f5c7da368842..09db034558017e3d327960e8333d5defa35acdcb 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) mddev->queue->limits.max_write_zeroes_sectors = 0; } - -/* Maximum size of each resync request */ -#define RESYNC_BLOCK_SIZE (64*1024) -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) - -/* for managing resync I/O pages */ -struct resync_pages { - unsigned idx; /* for get/put page from the pool */ - void *raid_bio; - struct page *pages[RESYNC_PAGES]; -}; - -static inline int resync_alloc_pages(struct resync_pages *rp, - gfp_t gfp_flags) -{ - int i; - - for (i = 0; i < RESYNC_PAGES; i++) { - rp->pages[i] = alloc_page(gfp_flags); - if (!rp->pages[i]) - goto out_free; - } - - return 0; - -out_free: - while (--i >= 0) - put_page(rp->pages[i]); - return -ENOMEM; -} - -static inline void resync_free_pages(struct resync_pages *rp) -{ - int i; - - for (i = 0; i < RESYNC_PAGES; i++) - put_page(rp->pages[i]); -} - -static inline void resync_get_all_pages(struct resync_pages *rp) -{ - int i; - - for (i = 0; i < RESYNC_PAGES; i++) - get_page(rp->pages[i]); -} - -static inline struct page *resync_fetch_page(struct resync_pages *rp, - unsigned idx) -{ - if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) - return NULL; - return rp->pages[idx]; -} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c new file mode 100644 index 0000000000000000000000000000000000000000..9f2670b45f31ae8b99b09f33309bf31c6f704c7c --- /dev/null +++ b/drivers/md/raid1-10.c @@ -0,0 +1,81 @@ +/* Maximum size of each resync request */ +#define RESYNC_BLOCK_SIZE (64*1024) +#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) + +/* for managing resync I/O pages */ +struct resync_pages { + void *raid_bio; + struct page *pages[RESYNC_PAGES]; +}; + +static inline int resync_alloc_pages(struct resync_pages *rp, + gfp_t gfp_flags) +{ + int i; + + for (i = 0; i < RESYNC_PAGES; i++) { + rp->pages[i] = alloc_page(gfp_flags); + if (!rp->pages[i]) + goto out_free; + } + + return 0; + +out_free: + while (--i >= 0) + put_page(rp->pages[i]); + return -ENOMEM; +} + +static inline void resync_free_pages(struct resync_pages *rp) +{ + int i; + + for (i = 0; i < RESYNC_PAGES; i++) + put_page(rp->pages[i]); +} + +static inline void resync_get_all_pages(struct resync_pages *rp) +{ + int i; + + for (i = 0; i < RESYNC_PAGES; i++) + get_page(rp->pages[i]); +} + +static inline struct page *resync_fetch_page(struct resync_pages *rp, + unsigned idx) +{ + if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) + return NULL; + return rp->pages[idx]; +} + +/* + * 'strct resync_pages' stores actual pages used for doing the resync + * IO, and it is per-bio, so make .bi_private points to it. + */ +static inline struct resync_pages *get_resync_pages(struct bio *bio) +{ + return bio->bi_private; +} + +/* generally called after bio_reset() for reseting bvec */ +static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, + int size) +{ + int idx = 0; + + /* initialize bvec table again */ + do { + struct page *page = resync_fetch_page(rp, idx); + int len = min_t(int, size, PAGE_SIZE); + + /* + * won't fail because the vec table is big + * enough to hold all these pages + */ + bio_add_page(bio, page, len, 0); + size -= len; + } while (idx++ < RESYNC_PAGES && size > 0); +} diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3febfc8391fbd381876930bf7a38447b9346bb48..f50958ded9f0c4dd9982c440b20d4e8854697b0d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); #define raid1_log(md, fmt, args...) \ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) -/* - * 'strct resync_pages' stores actual pages used for doing the resync - * IO, and it is per-bio, so make .bi_private points to it. - */ -static inline struct resync_pages *get_resync_pages(struct bio *bio) -{ - return bio->bi_private; -} +#include "raid1-10.c" /* * for resync bio, r1bio pointer can be retrieved from the per-bio @@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) resync_get_all_pages(rp); } - rp->idx = 0; rp->raid_bio = r1_bio; bio->bi_private = rp; } @@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - /* we release behind master bio when all write are done */ - if (r1_bio->behind_master_bio == bio) - to_put = NULL; - if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ @@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, +static void alloc_behind_master_bio(struct r1bio *r1_bio, struct bio *bio) { int size = bio->bi_iter.bi_size; @@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); if (!behind_bio) - goto fail; + return; /* discard op, we don't support writezero/writesame yet */ - if (!bio_has_data(bio)) + if (!bio_has_data(bio)) { + behind_bio->bi_iter.bi_size = size; goto skip_copy; + } while (i < vcnt && size) { struct page *page; @@ -1123,14 +1112,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, r1_bio->behind_master_bio = behind_bio;; set_bit(R1BIO_BehindIO, &r1_bio->state); - return behind_bio; + return; free_pages: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_iter.bi_size); bio_free_pages(behind_bio); -fail: - return behind_bio; + bio_put(behind_bio); } struct raid1_plug_cb { @@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { - mbio = alloc_behind_master_bio(r1_bio, bio); + alloc_behind_master_bio(r1_bio, bio); } bitmap_startwrite(bitmap, r1_bio->sector, @@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, first_clone = 0; } - if (!mbio) { - if (r1_bio->behind_master_bio) - mbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, - mddev->bio_set); - else - mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); - } + if (r1_bio->behind_master_bio) + mbio = bio_clone_fast(r1_bio->behind_master_bio, + GFP_NOIO, mddev->bio_set); + else + mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); if (r1_bio->behind_master_bio) { if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) @@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio) /* Fix variable parts of all bios */ vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); for (i = 0; i < conf->raid_disks * 2; i++) { - int j; - int size; blk_status_t status; - struct bio_vec *bi; struct bio *b = r1_bio->bios[i]; struct resync_pages *rp = get_resync_pages(b); if (b->bi_end_io != end_sync_read) @@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio) status = b->bi_status; bio_reset(b); b->bi_status = status; - b->bi_vcnt = vcnt; - b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; @@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio) rp->raid_bio = r1_bio; b->bi_private = rp; - size = b->bi_iter.bi_size; - bio_for_each_segment_all(bi, b, j) { - bi->bv_offset = 0; - if (size > PAGE_SIZE) - bi->bv_len = PAGE_SIZE; - else - bi->bv_len = size; - size -= PAGE_SIZE; - } + /* initialize bvec table again */ + md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); } for (primary = 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && @@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) wbio = bio_clone_fast(r1_bio->behind_master_bio, GFP_NOIO, mddev->bio_set); - /* We really need a _all clone */ - wbio->bi_iter = (struct bvec_iter){ 0 }; } else { wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, mddev->bio_set); @@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, int good_sectors = RESYNC_SECTORS; int min_bad = 0; /* number of sectors that are bad in all devices */ int idx = sector_to_idx(sector_nr); + int page_idx = 0; if (!conf->r1buf_pool) if (init_resync(conf)) @@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; rp = get_resync_pages(bio); if (bio->bi_end_io) { - page = resync_fetch_page(rp, rp->idx++); + page = resync_fetch_page(rp, page_idx); /* * won't fail because the vec table is big @@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, nr_sectors += len>>9; sector_nr += len>>9; sync_blocks -= (len>>9); - } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); + } while (++page_idx < RESYNC_PAGES); r1_bio->sectors = nr_sectors; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5026e7ad51d3a8c50bc3803c32fed8c4e026a0ea..f55d4cc085f60daa6b25c9f398e462420dbc6c18 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf); #define raid10_log(md, fmt, args...) \ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) -/* - * 'strct resync_pages' stores actual pages used for doing the resync - * IO, and it is per-bio, so make .bi_private points to it. - */ -static inline struct resync_pages *get_resync_pages(struct bio *bio) -{ - return bio->bi_private; -} +#include "raid1-10.c" /* * for resync bio, r10bio pointer can be retrieved from the per-bio @@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) resync_get_all_pages(rp); } - rp->idx = 0; rp->raid_bio = r10_bio; bio->bi_private = rp; if (rbio) { @@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ @@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ @@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) rp = get_resync_pages(tbio); bio_reset(tbio); - tbio->bi_vcnt = vcnt; - tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; + md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); + rp->raid_bio = r10_bio; tbio->bi_private = rp; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; @@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sectors_skipped = 0; int chunks_skipped = 0; sector_t chunk_mask = conf->geo.chunk_mask; + int page_idx = 0; if (!conf->r10buf_pool) if (init_resync(conf)) @@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, break; for (bio= biolist ; bio ; bio=bio->bi_next) { struct resync_pages *rp = get_resync_pages(bio); - page = resync_fetch_page(rp, rp->idx++); + page = resync_fetch_page(rp, page_idx); /* * won't fail because the vec table is big enough * to hold all these pages @@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, } nr_sectors += len>>9; sector_nr += len>>9; - } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); + } while (++page_idx < RESYNC_PAGES); r10_bio->sectors = nr_sectors; while (biolist) { diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index bfa1e907c472e49855f9e0abb4c307cd45514d4d..2dcbafa8e66ca7f5418fce8434fe9674977f0832 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -236,9 +236,10 @@ struct r5l_io_unit { bool need_split_bio; struct bio *split_bio; - unsigned int has_flush:1; /* include flush request */ - unsigned int has_fua:1; /* include fua request */ - unsigned int has_null_flush:1; /* include empty flush request */ + unsigned int has_flush:1; /* include flush request */ + unsigned int has_fua:1; /* include fua request */ + unsigned int has_null_flush:1; /* include null flush request */ + unsigned int has_flush_payload:1; /* include flush payload */ /* * io isn't sent yet, flush/fua request can only be submitted till it's * the first IO in running_ios list @@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio) struct r5l_io_unit *io_deferred; struct r5l_log *log = io->log; unsigned long flags; + bool has_null_flush; + bool has_flush_payload; if (bio->bi_status) md_error(log->rdev->mddev, log->rdev); @@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio) spin_lock_irqsave(&log->io_list_lock, flags); __r5l_set_io_unit_state(io, IO_UNIT_IO_END); + + /* + * if the io doesn't not have null_flush or flush payload, + * it is not safe to access it after releasing io_list_lock. + * Therefore, it is necessary to check the condition with + * the lock held. + */ + has_null_flush = io->has_null_flush; + has_flush_payload = io->has_flush_payload; + if (log->need_cache_flush && !list_empty(&io->stripe_list)) r5l_move_to_end_ios(log); else @@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio) if (log->need_cache_flush) md_wakeup_thread(log->rdev->mddev->thread); - if (io->has_null_flush) { + /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ + if (has_null_flush) { struct bio *bi; WARN_ON(bio_list_empty(&io->flush_barriers)); while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { bio_endio(bi); - atomic_dec(&io->pending_stripe); + if (atomic_dec_and_test(&io->pending_stripe)) { + __r5l_stripe_write_finished(io); + return; + } } } - - /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ - if (atomic_read(&io->pending_stripe) == 0) - __r5l_stripe_write_finished(io); + /* decrease pending_stripe for flush payload */ + if (has_flush_payload) + if (atomic_dec_and_test(&io->pending_stripe)) + __r5l_stripe_write_finished(io); } static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) @@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) payload->size = cpu_to_le32(sizeof(__le64)); payload->flush_stripes[0] = cpu_to_le64(sect); io->meta_offset += meta_size; + /* multiple flush payloads count as one pending_stripe */ + if (!io->has_flush_payload) { + io->has_flush_payload = 1; + atomic_inc(&io->pending_stripe); + } mutex_unlock(&log->io_mutex); } @@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) */ int r5c_journal_mode_set(struct mddev *mddev, int mode) { - struct r5conf *conf = mddev->private; - struct r5l_log *log = conf->log; - - if (!log) - return -ENODEV; + struct r5conf *conf; + int err; if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || mode > R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; + err = mddev_lock(mddev); + if (err) + return err; + conf = mddev->private; + if (!conf || !conf->log) { + mddev_unlock(mddev); + return -ENODEV; + } + if (raid5_calc_degraded(conf) > 0 && - mode == R5C_JOURNAL_MODE_WRITE_BACK) + mode == R5C_JOURNAL_MODE_WRITE_BACK) { + mddev_unlock(mddev); return -EINVAL; + } mddev_suspend(mddev); conf->log->r5c_journal_mode = mode; mddev_resume(mddev); + mddev_unlock(mddev); pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", mdname(mddev), mode, r5c_journal_mode_str[mode]); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index aeeb8d6854e2610dd2e006aa419264415d950da7..0fc2748aaf95a48a4b1fb227d3b80bffe9cea8d5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_status = BLK_STS_IOERR; md_write_end(conf->mddev); - bio_endio(bi); + bio_io_error(bi); bi = nextbi; } if (bitmap_end) @@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_status = BLK_STS_IOERR; md_write_end(conf->mddev); - bio_endio(bi); + bio_io_error(bi); bi = bi2; } @@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_status = BLK_STS_IOERR; - bio_endio(bi); + bio_io_error(bi); bi = nextbi; } } @@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); + + async_tx_issue_pending_all(); blk_finish_plug(&plug); pr_debug("--- raid5worker inactive\n"); diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index bf45977b2823b067076eb1e01f741af8dd424bdb..d596b601ff42a6efe27d9e3aa82c0e7521ba2856 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(cec_transmit_done); void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) { - switch (status) { + switch (status & ~CEC_TX_STATUS_MAX_RETRIES) { case CEC_TX_STATUS_OK: cec_transmit_done(adap, status, 0, 0, 0, 0); return; diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c index 74dc1c32080eaf8d57d837fb93ab6587992e0483..08b619d0ea1ef7f24f93771471366a340517b764 100644 --- a/drivers/media/cec/cec-notifier.c +++ b/drivers/media/cec/cec-notifier.c @@ -87,6 +87,9 @@ EXPORT_SYMBOL_GPL(cec_notifier_put); void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) { + if (n == NULL) + return; + mutex_lock(&n->lock); n->phys_addr = pa; if (n->callback) @@ -100,6 +103,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, { u16 pa = CEC_PHYS_ADDR_INVALID; + if (n == NULL) + return; + if (edid && edid->extensions) pa = cec_get_edid_phys_addr((const u8 *)edid, EDID_LENGTH * (edid->extensions + 1), NULL); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index af694f2066a2f4b34f710952e02878783dab4e72..17970cdd55fa75c99e21481b1213c21461670ff3 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -349,7 +349,8 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) /* read the buffer size from the CAM */ if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) return ret; - if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0) + ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ); + if (ret != 0) return ret; if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) return -EIO; @@ -644,72 +645,101 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, } buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); - if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { + if (buf_free < (ca->slot_info[slot].link_buf_size + + DVB_RINGBUFFER_PKTHDRSIZE)) { status = -EAGAIN; goto exit; } } - /* check if there is data available */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) - goto exit; - if (!(status & STATUSREG_DA)) { - /* no data */ - status = 0; - goto exit; - } - - /* read the amount of data */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0) - goto exit; - bytes_read = status << 8; - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0) - goto exit; - bytes_read |= status; + if (ca->pub->read_data && + (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) { + if (ebuf == NULL) + status = ca->pub->read_data(ca->pub, slot, buf, + sizeof(buf)); + else + status = ca->pub->read_data(ca->pub, slot, buf, ecount); + if (status < 0) + return status; + bytes_read = status; + if (status == 0) + goto exit; + } else { - /* check it will fit */ - if (ebuf == NULL) { - if (bytes_read > ca->slot_info[slot].link_buf_size) { - pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", - ca->dvbdev->adapter->num, bytes_read, - ca->slot_info[slot].link_buf_size); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - status = -EIO; + /* check if there is data available */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_STATUS); + if (status < 0) goto exit; - } - if (bytes_read < 2) { - pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - status = -EIO; + if (!(status & STATUSREG_DA)) { + /* no data */ + status = 0; goto exit; } - } else { - if (bytes_read > ecount) { - pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", - ca->dvbdev->adapter->num); - status = -EIO; + + /* read the amount of data */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_SIZE_HIGH); + if (status < 0) + goto exit; + bytes_read = status << 8; + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_SIZE_LOW); + if (status < 0) goto exit; + bytes_read |= status; + + /* check it will fit */ + if (ebuf == NULL) { + if (bytes_read > ca->slot_info[slot].link_buf_size) { + pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", + ca->dvbdev->adapter->num, bytes_read, + ca->slot_info[slot].link_buf_size); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + if (bytes_read < 2) { + pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + } else { + if (bytes_read > ecount) { + pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", + ca->dvbdev->adapter->num); + status = -EIO; + goto exit; + } } - } - /* fill the buffer */ - for (i = 0; i < bytes_read; i++) { - /* read byte and check */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0) - goto exit; + /* fill the buffer */ + for (i = 0; i < bytes_read; i++) { + /* read byte and check */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_DATA); + if (status < 0) + goto exit; - /* OK, store it in the buffer */ - buf[i] = status; - } + /* OK, store it in the buffer */ + buf[i] = status; + } - /* check for read error (RE should now be 0) */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) - goto exit; - if (status & STATUSREG_RE) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - status = -EIO; - goto exit; + /* check for read error (RE should now be 0) */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_STATUS); + if (status < 0) + goto exit; + if (status & STATUSREG_RE) { + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } } /* OK, add it to the receive buffer, or copy into external buffer if supplied */ @@ -762,6 +792,10 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, if (bytes_write > ca->slot_info[slot].link_buf_size) return -EINVAL; + if (ca->pub->write_data && + (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) + return ca->pub->write_data(ca->pub, slot, buf, bytes_write); + /* it is possible we are dealing with a single buffer implementation, thus if there is data available for read or if there is even a read already in progress, we do nothing but awake the kernel thread to @@ -1176,7 +1210,8 @@ static int dvb_ca_en50221_thread(void *data) pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_UNINITIALISED; dvb_ca_en50221_thread_update_delay(ca); break; } diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h index 1e4bbbd34d911fc5f22b2421f3b45a385c094c07..82617bac08756ead4992822f563c6fa10333042d 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.h +++ b/drivers/media/dvb-core/dvb_ca_en50221.h @@ -41,6 +41,8 @@ * @write_attribute_mem: function for writing attribute memory on the CAM * @read_cam_control: function for reading the control interface on the CAM * @write_cam_control: function for reading the control interface on the CAM + * @read_data: function for reading data (block mode) + * @write_data: function for writing data (block mode) * @slot_reset: function to reset the CAM slot * @slot_shutdown: function to shutdown a CAM slot * @slot_ts_enable: function to enable the Transport Stream on a CAM slot @@ -66,6 +68,11 @@ struct dvb_ca_en50221 { int (*write_cam_control)(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value); + int (*read_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*write_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 08f67d60a7d9f644b7d59fd847036ae4a8224f13..12bff778c97f67c23f1c451ff050707686266824 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -3279,7 +3279,10 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe, else if (priv->state == STATE_ACTIVE_TC) cxd2841er_read_status_tc(fe, &status); - cxd2841er_read_signal_strength(fe); + if (priv->state == STATE_ACTIVE_TC || priv->state == STATE_ACTIVE_S) + cxd2841er_read_signal_strength(fe); + else + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; if (status & FE_HAS_LOCK) { cxd2841er_read_snr(fe); diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 4442e478db72a2420207efc2deca49d56c92c30c..cd69e187ba7a9c0eea95855531a82db3e09a528b 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h @@ -307,7 +307,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, * \def DRX_UNKNOWN * \brief Generic UNKNOWN value for DRX enumerated types. * -* Used to indicate that the parameter value is unknown or not yet initalized. +* Used to indicate that the parameter value is unknown or not yet initialized. */ #ifndef DRX_UNKNOWN #define DRX_UNKNOWN (254) @@ -449,19 +449,6 @@ MACROS #define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \ ((u8)((((u16)x)>>8)&0xFF)) -/** -* \brief Macro to sign extend signed 9 bit value to signed 16 bit value -*/ -#define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x)) - -/** -* \brief Macro to sign extend signed 9 bit value to signed 16 bit value -*/ -#define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \ - ((s32) \ - (((u32) x) | 0xFF000000)) : \ - ((s32) x)) - /** * \brief Macro to convert 16 bit register value to a s32 */ diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c index ef3021e964be89e61eeda59daa38d1e71fa7320c..cb486e879fdd3f50925dd8431a4d0a4de0e09d4d 100644 --- a/drivers/media/dvb-frontends/lnbh25.c +++ b/drivers/media/dvb-frontends/lnbh25.c @@ -76,8 +76,8 @@ static int lnbh25_read_vmon(struct lnbh25_priv *priv) return ret; } } - print_hex_dump_bytes("lnbh25_read_vmon: ", - DUMP_PREFIX_OFFSET, status, sizeof(status)); + dev_dbg(&priv->i2c->dev, "%s(): %*ph\n", + __func__, (int) sizeof(status), status); if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) { dev_err(&priv->i2c->dev, "%s(): voltage in failure state, status reg 0x%x\n", @@ -178,7 +178,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe, fe->ops.release_sec = lnbh25_release; fe->ops.set_voltage = lnbh25_set_voltage; - dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", + dev_info(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", __func__, priv->i2c_address); return fe; } diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index e726c2e004601988a4af69559d4dbbead03d286b..8ac0f598978dacb8ed441bf6cc816a8b6bd5a463 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -25,6 +25,8 @@ #include #include +#include "dvb_math.h" + #include "stv0367.h" #include "stv0367_defs.h" #include "stv0367_regs.h" @@ -1437,7 +1439,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe, return 0; } -static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) +static u32 stv0367ter_snr_readreg(struct dvb_frontend *fe) { struct stv0367_state *state = fe->demodulator_priv; u32 snru32 = 0; @@ -1453,10 +1455,16 @@ static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) cpt++; } - snru32 /= 10;/*average on 10 values*/ - *snr = snru32 / 1000; + return snru32; +} + +static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + u32 snrval = stv0367ter_snr_readreg(fe); + + *snr = snrval / 1000; return 0; } @@ -1501,7 +1509,8 @@ static int stv0367ter_read_status(struct dvb_frontend *fe, *status = 0; if (stv0367_readbits(state, F367TER_LK)) { - *status |= FE_HAS_LOCK; + *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI + | FE_HAS_SYNC | FE_HAS_LOCK; dprintk("%s: stv0367 has locked\n", __func__); } @@ -2149,6 +2158,18 @@ static int stv0367cab_read_status(struct dvb_frontend *fe, *status = 0; + if (state->cab_state->state > FE_CAB_NOSIGNAL) + *status |= FE_HAS_SIGNAL; + + if (state->cab_state->state > FE_CAB_NOCARRIER) + *status |= FE_HAS_CARRIER; + + if (state->cab_state->state >= FE_CAB_DEMODOK) + *status |= FE_HAS_VITERBI; + + if (state->cab_state->state >= FE_CAB_DATAOK) + *status |= FE_HAS_SYNC; + if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ? state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) { *status |= FE_HAS_LOCK; @@ -2702,51 +2723,61 @@ static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength) return 0; } -static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) +static int stv0367cab_snr_power(struct dvb_frontend *fe) { struct stv0367_state *state = fe->demodulator_priv; - u32 noisepercentage; enum stv0367cab_mod QAMSize; - u32 regval = 0, temp = 0; - int power, i; QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE); switch (QAMSize) { case FE_CAB_MOD_QAM4: - power = 21904; - break; + return 21904; case FE_CAB_MOD_QAM16: - power = 20480; - break; + return 20480; case FE_CAB_MOD_QAM32: - power = 23040; - break; + return 23040; case FE_CAB_MOD_QAM64: - power = 21504; - break; + return 21504; case FE_CAB_MOD_QAM128: - power = 23616; - break; + return 23616; case FE_CAB_MOD_QAM256: - power = 21760; - break; - case FE_CAB_MOD_QAM512: - power = 1; - break; + return 21760; case FE_CAB_MOD_QAM1024: - power = 21280; - break; + return 21280; default: - power = 1; break; } + return 1; +} + +static int stv0367cab_snr_readreg(struct dvb_frontend *fe, int avgdiv) +{ + struct stv0367_state *state = fe->demodulator_priv; + u32 regval = 0; + int i; + for (i = 0; i < 10; i++) { regval += (stv0367_readbits(state, F367CAB_SNR_LO) + 256 * stv0367_readbits(state, F367CAB_SNR_HI)); } - regval /= 10; /*for average over 10 times in for loop above*/ + if (avgdiv) + regval /= 10; + + return regval; +} + +static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + struct stv0367_state *state = fe->demodulator_priv; + u32 noisepercentage; + u32 regval = 0, temp = 0; + int power; + + power = stv0367cab_snr_power(fe); + regval = stv0367cab_snr_readreg(fe, 1); + if (regval != 0) { temp = power * (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER))); @@ -2980,21 +3011,117 @@ static int stv0367ddb_set_frontend(struct dvb_frontend *fe) return -EINVAL; } +static void stv0367ddb_read_signal_strength(struct dvb_frontend *fe) +{ + struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + s32 signalstrength; + + switch (state->activedemod) { + case demod_cab: + signalstrength = stv0367cab_get_rf_lvl(state) * 1000; + break; + default: + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + p->strength.stat[0].scale = FE_SCALE_DECIBEL; + p->strength.stat[0].uvalue = signalstrength; +} + +static void stv0367ddb_read_snr(struct dvb_frontend *fe) +{ + struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + int cab_pwr; + u32 regval, tmpval, snrval = 0; + + switch (state->activedemod) { + case demod_ter: + snrval = stv0367ter_snr_readreg(fe); + break; + case demod_cab: + cab_pwr = stv0367cab_snr_power(fe); + regval = stv0367cab_snr_readreg(fe, 0); + + /* prevent division by zero */ + if (!regval) { + snrval = 0; + break; + } + + tmpval = (cab_pwr * 320) / regval; + snrval = ((tmpval != 0) ? (intlog2(tmpval) / 5581) : 0); + break; + default: + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + p->cnr.stat[0].scale = FE_SCALE_DECIBEL; + p->cnr.stat[0].uvalue = snrval; +} + +static void stv0367ddb_read_ucblocks(struct dvb_frontend *fe) +{ + struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 ucblocks = 0; + + switch (state->activedemod) { + case demod_ter: + stv0367ter_read_ucblocks(fe, &ucblocks); + break; + case demod_cab: + stv0367cab_read_ucblcks(fe, &ucblocks); + break; + default: + p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + p->block_error.stat[0].scale = FE_SCALE_COUNTER; + p->block_error.stat[0].uvalue = ucblocks; +} + static int stv0367ddb_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + int ret; switch (state->activedemod) { case demod_ter: - return stv0367ter_read_status(fe, status); + ret = stv0367ter_read_status(fe, status); + break; case demod_cab: - return stv0367cab_read_status(fe, status); - default: + ret = stv0367cab_read_status(fe, status); break; + default: + return 0; } - return -EINVAL; + /* stop and report on *_read_status failure */ + if (ret) + return ret; + + stv0367ddb_read_signal_strength(fe); + + /* read carrier/noise when a carrier is detected */ + if (*status & FE_HAS_CARRIER) + stv0367ddb_read_snr(fe); + else + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + /* read uncorrected blocks on FE_HAS_LOCK */ + if (*status & FE_HAS_LOCK) + stv0367ddb_read_ucblocks(fe); + else + p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + return 0; } static int stv0367ddb_get_frontend(struct dvb_frontend *fe, @@ -3035,6 +3162,7 @@ static int stv0367ddb_sleep(struct dvb_frontend *fe) static int stv0367ddb_init(struct stv0367_state *state) { struct stv0367ter_state *ter_state = state->ter_state; + struct dtv_frontend_properties *p = &state->fe.dtv_property_cache; stv0367_writereg(state, R367TER_TOPCTRL, 0x10); @@ -3109,6 +3237,13 @@ static int stv0367ddb_init(struct stv0367_state *state) ter_state->first_lock = 0; ter_state->unlock_counter = 2; + p->strength.len = 1; + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->cnr.len = 1; + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->block_error.len = 1; + p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return 0; } @@ -3126,15 +3261,12 @@ static const struct dvb_frontend_ops stv0367ddb_ops = { 0x400 |/* FE_CAN_QAM_4 */ FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | - FE_CAN_QAM_256 | FE_CAN_FEC_AUTO | + FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | /* DVB-T */ - FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | - FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | - FE_CAN_FEC_AUTO | - FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | - FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | - FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER | - FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | + FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | + FE_CAN_QPSK | FE_CAN_TRANSMISSION_MODE_AUTO | + FE_CAN_RECOVER | FE_CAN_INVERSION_AUTO | FE_CAN_MUTE_TS }, .release = stv0367_release, diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index 6e313d5243a0352791b7cd2117da5e0aaa8990a7..f39f5179dd95792b5414a3132b8ccdf730abaded 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c @@ -1496,7 +1496,6 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table); static const struct dev_pm_ops et8ek8_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume) }; -MODULE_DEVICE_TABLE(of, et8ek8_of_table); static struct i2c_driver et8ek8_i2c_driver = { .driver = { diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 9da4bf4f2c7a73222043088ba68be4e2c0051108..7b79a7498751981e424bbe4d515f3463c2953975 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -659,7 +659,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, struct tvp5150 *decoder = to_tvp5150(sd); v4l2_std_id std = decoder->norm; u8 reg; - int pos=0; + int pos = 0; if (std == V4L2_STD_ALL) { dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); @@ -669,33 +669,30 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, line += 3; } - if (line<6||line>27) + if (line < 6 || line > 27) return 0; - while (regs->reg != (u16)-1 ) { + while (regs->reg != (u16)-1) { if ((type & regs->type.vbi_type) && - (line>=regs->type.ini_line) && - (line<=regs->type.end_line)) { - type=regs->type.vbi_type; + (line >= regs->type.ini_line) && + (line <= regs->type.end_line)) break; - } regs++; pos++; } + if (regs->reg == (u16)-1) return 0; - type=pos | (flags & 0xf0); - reg=((line-6)<<1)+TVP5150_LINE_MODE_INI; + type = pos | (flags & 0xf0); + reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; - if (fields&1) { + if (fields & 1) tvp5150_write(sd, reg, type); - } - if (fields&2) { - tvp5150_write(sd, reg+1, type); - } + if (fields & 2) + tvp5150_write(sd, reg + 1, type); return type; } diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 9420479bee9a5ad324b0344541e0905495cfe91c..cd1723e79a078a650e6777206ffc417d9c83f4bb 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -17,6 +17,8 @@ * http://www.gnu.org/copyleft/gpl.html */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -114,6 +116,19 @@ static int i2c_write_reg(struct i2c_adapter *adap, u8 adr, return i2c_write(adap, adr, msg, 2); } +static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr) +{ + u32 val = ddbreadl(adr); + + /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */ + if (val == ~0) { + dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr); + return 0; + } + + return val; +} + static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) { struct ddb *dev = i2c->dev; @@ -124,10 +139,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); if (stat == 0) { - printk(KERN_ERR "I2C timeout\n"); + dev_err(&dev->pdev->dev, "I2C timeout\n"); { /* MSI debugging*/ u32 istat = ddbreadl(INTERRUPT_STATUS); - printk(KERN_ERR "IRS %08x\n", istat); + dev_err(&dev->pdev->dev, "IRS %08x\n", istat); ddbwritel(istat, INTERRUPT_ACK); } return -EIO; @@ -533,7 +548,7 @@ static u32 ddb_input_avail(struct ddb_input *input) off = (stat & 0x7ff) << 7; if (ctrl & 4) { - printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl); + dev_err(&dev->pdev->dev, "IA %d %d %08x\n", idx, off, ctrl); ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr)); return 0; } @@ -611,6 +626,7 @@ static int demod_attach_drxk(struct ddb_input *input) struct i2c_adapter *i2c = &input->port->i2c->adap; struct dvb_frontend *fe; struct drxk_config config; + struct device *dev = &input->port->dev->pdev->dev; memset(&config, 0, sizeof(config)); config.microcode_name = "drxk_a3.mc"; @@ -619,7 +635,7 @@ static int demod_attach_drxk(struct ddb_input *input) fe = input->fe = dvb_attach(drxk_attach, &config, i2c); if (!input->fe) { - printk(KERN_ERR "No DRXK found!\n"); + dev_err(dev, "No DRXK found!\n"); return -ENODEV; } fe->sec_priv = input; @@ -632,12 +648,13 @@ static int tuner_attach_tda18271(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; struct dvb_frontend *fe; + struct device *dev = &input->port->dev->pdev->dev; if (input->fe->ops.i2c_gate_ctrl) input->fe->ops.i2c_gate_ctrl(input->fe, 1); fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60); if (!fe) { - printk(KERN_ERR "No TDA18271 found!\n"); + dev_err(dev, "No TDA18271 found!\n"); return -ENODEV; } if (input->fe->ops.i2c_gate_ctrl) @@ -670,13 +687,14 @@ static struct stv0367_config ddb_stv0367_config[] = { static int demod_attach_stv0367(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; /* attach frontend */ input->fe = dvb_attach(stv0367ddb_attach, &ddb_stv0367_config[(input->nr & 1)], i2c); if (!input->fe) { - printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n"); + dev_err(dev, "stv0367ddb_attach failed (not found?)\n"); return -ENODEV; } @@ -690,17 +708,19 @@ static int demod_attach_stv0367(struct ddb_input *input) static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) { struct i2c_adapter *adapter = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; + u8 tda_id[2]; u8 subaddr = 0x00; - printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n"); + dev_dbg(dev, "stv0367-tda18212 tuner ping\n"); if (input->fe->ops.i2c_gate_ctrl) input->fe->ops.i2c_gate_ctrl(input->fe, 1); if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) - printk(KERN_DEBUG "tda18212 ping 1 fail\n"); + dev_dbg(dev, "tda18212 ping 1 fail\n"); if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) - printk(KERN_DEBUG "tda18212 ping 2 fail\n"); + dev_warn(dev, "tda18212 ping failed, expect problems\n"); if (input->fe->ops.i2c_gate_ctrl) input->fe->ops.i2c_gate_ctrl(input->fe, 0); @@ -711,6 +731,7 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct cxd2841er_config cfg; /* the cxd2841er driver expects 8bit/shifted I2C addresses */ @@ -728,7 +749,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); if (!input->fe) { - printk(KERN_ERR "No Sony CXD28xx found!\n"); + dev_err(dev, "No Sony CXD28xx found!\n"); return -ENODEV; } @@ -742,6 +763,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) { struct i2c_adapter *adapter = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct i2c_client *client; struct tda18212_config config = { .fe = input->fe, @@ -786,7 +808,7 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) return 0; err: - printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n"); + dev_warn(dev, "TDA18212 tuner not found. Device is not fully operational.\n"); return -ENODEV; } @@ -847,19 +869,20 @@ static struct stv6110x_config stv6110b = { static int demod_attach_stv0900(struct ddb_input *input, int type) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; input->fe = dvb_attach(stv090x_attach, feconf, i2c, (input->nr & 1) ? STV090x_DEMODULATOR_1 : STV090x_DEMODULATOR_0); if (!input->fe) { - printk(KERN_ERR "No STV0900 found!\n"); + dev_err(dev, "No STV0900 found!\n"); return -ENODEV; } if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0, 0, (input->nr & 1) ? (0x09 - type) : (0x0b - type))) { - printk(KERN_ERR "No LNBH24 found!\n"); + dev_err(dev, "No LNBH24 found!\n"); return -ENODEV; } return 0; @@ -868,6 +891,7 @@ static int demod_attach_stv0900(struct ddb_input *input, int type) static int tuner_attach_stv6110(struct ddb_input *input, int type) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; struct stv6110x_config *tunerconf = (input->nr & 1) ? &stv6110b : &stv6110a; @@ -875,10 +899,10 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type) ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); if (!ctl) { - printk(KERN_ERR "No STV6110X found!\n"); + dev_err(dev, "No STV6110X found!\n"); return -ENODEV; } - printk(KERN_INFO "attach tuner input %d adr %02x\n", + dev_info(dev, "attach tuner input %d adr %02x\n", input->nr, tunerconf->addr); feconf->tuner_init = ctl->tuner_init; @@ -1009,13 +1033,14 @@ static int dvb_input_attach(struct ddb_input *input) struct ddb_port *port = input->port; struct dvb_adapter *adap = &input->adap; struct dvb_demux *dvbdemux = &input->demux; + struct device *dev = &input->port->dev->pdev->dev; int sony_osc24 = 0, sony_tspar = 0; ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, &input->port->dev->pdev->dev, adapter_nr); if (ret < 0) { - printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n"); + dev_err(dev, "Could not register adapter. Check if you enabled enough adapters in dvb-core!\n"); return ret; } input->attached = 1; @@ -1241,9 +1266,9 @@ static void input_tasklet(unsigned long data) if (input->port->class == DDB_PORT_TUNER) { if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr))) - printk(KERN_ERR "Overflow input %d\n", input->nr); + dev_err(&dev->pdev->dev, "Overflow input %d\n", input->nr); while (input->cbuf != ((input->stat >> 11) & 0x1f) - || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) { + || (4 & safe_ddbreadl(dev, DMA_BUFFER_CONTROL(input->nr)))) { dvb_dmx_swfilter_packets(&input->demux, input->vbuf[input->cbuf], input->dma_buf_size / 188); @@ -1280,6 +1305,7 @@ static struct cxd2099_cfg cxd_cfg = { .adr = 0x40, .polarity = 1, .clock_mode = 1, + .max_i2c = 512, }; static int ddb_ci_attach(struct ddb_port *port) @@ -1310,6 +1336,7 @@ static int ddb_ci_attach(struct ddb_port *port) static int ddb_port_attach(struct ddb_port *port) { + struct device *dev = &port->dev->pdev->dev; int ret = 0; switch (port->class) { @@ -1326,7 +1353,7 @@ static int ddb_port_attach(struct ddb_port *port) break; } if (ret < 0) - printk(KERN_ERR "port_attach on port %d failed\n", port->nr); + dev_err(dev, "port_attach on port %d failed\n", port->nr); return ret; } @@ -1377,6 +1404,7 @@ static void ddb_ports_detach(struct ddb *dev) static int init_xo2(struct ddb_port *port) { struct i2c_adapter *i2c = &port->i2c->adap; + struct device *dev = &port->dev->pdev->dev; u8 val, data[2]; int res; @@ -1385,7 +1413,7 @@ static int init_xo2(struct ddb_port *port) return res; if (data[0] != 0x01) { - pr_info("Port %d: invalid XO2\n", port->nr); + dev_info(dev, "Port %d: invalid XO2\n", port->nr); return -1; } @@ -1511,7 +1539,7 @@ static void ddb_port_probe(struct ddb_port *port) port->class = DDB_PORT_CI; ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); } else if (port_has_xo2(port, &xo2_type, &xo2_id)) { - printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n", + dev_dbg(&dev->pdev->dev, "Port %d (TAB %d): XO2 type: %d, id: %d\n", port->nr, port->nr+1, xo2_type, xo2_id); ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); @@ -1556,10 +1584,10 @@ static void ddb_port_probe(struct ddb_port *port) } break; case DDB_XO2_TYPE_CI: - printk(KERN_INFO "DuoFlex CI modules not supported\n"); + dev_info(&dev->pdev->dev, "DuoFlex CI modules not supported\n"); break; default: - printk(KERN_INFO "Unknown XO2 DuoFlex module\n"); + dev_info(&dev->pdev->dev, "Unknown XO2 DuoFlex module\n"); break; } } else if (port_has_cxd28xx(port, &cxd_id)) { @@ -1611,7 +1639,7 @@ static void ddb_port_probe(struct ddb_port *port) ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); } - printk(KERN_INFO "Port %d (TAB %d): %s\n", + dev_info(&dev->pdev->dev, "Port %d (TAB %d): %s\n", port->nr, port->nr+1, modname); } @@ -1765,7 +1793,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) wbuf += 4; wlen -= 4; ddbwritel(data, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; } @@ -1785,7 +1813,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) if (shift) data <<= shift; ddbwritel(data, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; if (!rlen) { @@ -1797,7 +1825,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) while (rlen > 4) { ddbwritel(0xffffffff, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; data = ddbreadl(SPI_DATA); *(u32 *) rbuf = swab32(data); @@ -1806,7 +1834,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) } ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL); ddbwritel(0xffffffff, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; data = ddbreadl(SPI_DATA); @@ -1993,7 +2021,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->pdev = pdev; pci_set_drvdata(pdev, dev); dev->info = (struct ddb_info *) id->driver_data; - printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name); + dev_info(&pdev->dev, "Detected %s\n", dev->info->name); dev->regs = ioremap(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -2001,13 +2029,13 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) stat = -ENOMEM; goto fail; } - printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); + dev_info(&pdev->dev, "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); #ifdef CONFIG_PCI_MSI if (pci_msi_enabled()) stat = pci_enable_msi(dev->pdev); if (stat) { - printk(KERN_INFO ": MSI not available.\n"); + dev_info(&pdev->dev, "MSI not available.\n"); } else { irq_flag = 0; dev->msi = 1; @@ -2040,7 +2068,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail1; ddb_ports_init(dev); if (ddb_buffers_alloc(dev) < 0) { - printk(KERN_INFO ": Could not allocate buffer memory\n"); + dev_err(&pdev->dev, "Could not allocate buffer memory\n"); goto fail2; } if (ddb_ports_attach(dev) < 0) @@ -2050,19 +2078,19 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) fail3: ddb_ports_detach(dev); - printk(KERN_ERR "fail3\n"); + dev_err(&pdev->dev, "fail3\n"); ddb_ports_release(dev); fail2: - printk(KERN_ERR "fail2\n"); + dev_err(&pdev->dev, "fail2\n"); ddb_buffers_free(dev); fail1: - printk(KERN_ERR "fail1\n"); + dev_err(&pdev->dev, "fail1\n"); if (dev->msi) pci_disable_msi(dev->pdev); if (stat == 0) free_irq(dev->pdev->irq, dev); fail: - printk(KERN_ERR "fail\n"); + dev_err(&pdev->dev, "fail\n"); ddb_unmap(dev); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); @@ -2242,7 +2270,7 @@ static __init int module_init_ddbridge(void) { int ret; - printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); + pr_info("Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); ret = ddb_class_create(); if (ret < 0) diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index ce69e648b663a934f5ace1c834e8c0b1642d044a..8c92cb7f7e72f7af8ca6411ec60db81c8b95bbaa 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -336,9 +336,9 @@ int ngene_command(struct ngene *dev, struct ngene_command *com) { int result; - down(&dev->cmd_mutex); + mutex_lock(&dev->cmd_mutex); result = ngene_command_mutex(dev, com); - up(&dev->cmd_mutex); + mutex_unlock(&dev->cmd_mutex); return result; } @@ -560,7 +560,6 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700); u16 BsSDO = 0x9B00; - down(&dev->stream_mutex); memset(&com, 0, sizeof(com)); com.cmd.hdr.Opcode = CMD_CONTROL; com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2; @@ -586,17 +585,13 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, chan->State = KSSTATE_ACQUIRE; chan->HWState = HWSTATE_STOP; spin_unlock_irq(&chan->state_lock); - if (ngene_command(dev, &com) < 0) { - up(&dev->stream_mutex); + if (ngene_command(dev, &com) < 0) return -1; - } /* clear_buffers(chan); */ flush_buffers(chan); - up(&dev->stream_mutex); return 0; } spin_unlock_irq(&chan->state_lock); - up(&dev->stream_mutex); return 0; } @@ -692,11 +687,9 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, chan->HWState = HWSTATE_STARTUP; spin_unlock_irq(&chan->state_lock); - if (ngene_command(dev, &com) < 0) { - up(&dev->stream_mutex); + if (ngene_command(dev, &com) < 0) return -1; - } - up(&dev->stream_mutex); + return 0; } @@ -750,8 +743,11 @@ void set_transfer(struct ngene_channel *chan, int state) /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n", ngreadl(0x9310)); */ + mutex_lock(&dev->stream_mutex); ret = ngene_command_stream_control(dev, chan->number, control, mode, flags); + mutex_unlock(&dev->stream_mutex); + if (!ret) chan->running = state; else @@ -1283,7 +1279,7 @@ static int ngene_load_firm(struct ngene *dev) static void ngene_stop(struct ngene *dev) { - down(&dev->cmd_mutex); + mutex_destroy(&dev->cmd_mutex); i2c_del_adapter(&(dev->channel[0].i2c_adapter)); i2c_del_adapter(&(dev->channel[1].i2c_adapter)); ngwritel(0, NGENE_INT_ENABLE); @@ -1346,10 +1342,10 @@ static int ngene_start(struct ngene *dev) init_waitqueue_head(&dev->cmd_wq); init_waitqueue_head(&dev->tx_wq); init_waitqueue_head(&dev->rx_wq); - sema_init(&dev->cmd_mutex, 1); - sema_init(&dev->stream_mutex, 1); + mutex_init(&dev->cmd_mutex); + mutex_init(&dev->stream_mutex); sema_init(&dev->pll_mutex, 1); - sema_init(&dev->i2c_switch_mutex, 1); + mutex_init(&dev->i2c_switch_mutex); spin_lock_init(&dev->cmd_lock); for (i = 0; i < MAX_STREAM; i++) spin_lock_init(&dev->channel[i].state_lock); @@ -1606,10 +1602,10 @@ static void ngene_unlink(struct ngene *dev) com.in_len = 3; com.out_len = 1; - down(&dev->cmd_mutex); + mutex_lock(&dev->cmd_mutex); ngwritel(0, NGENE_INT_ENABLE); ngene_command_mutex(dev, &com); - up(&dev->cmd_mutex); + mutex_unlock(&dev->cmd_mutex); } void ngene_shutdown(struct pci_dev *pdev) diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c index cf39fcf54adfb812ca691caa68e09ddcbb434947..fbf36353c7014b9f3424a850585b82d147177e3a 100644 --- a/drivers/media/pci/ngene/ngene-i2c.c +++ b/drivers/media/pci/ngene/ngene-i2c.c @@ -118,7 +118,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, (struct ngene_channel *)i2c_get_adapdata(adapter); struct ngene *dev = chan->dev; - down(&dev->i2c_switch_mutex); + mutex_lock(&dev->i2c_switch_mutex); ngene_i2c_set_bus(dev, chan->number); if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) @@ -136,11 +136,11 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, msg[0].buf, msg[0].len, 0)) goto done; - up(&dev->i2c_switch_mutex); + mutex_unlock(&dev->i2c_switch_mutex); return -EIO; done: - up(&dev->i2c_switch_mutex); + mutex_unlock(&dev->i2c_switch_mutex); return num; } diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index 10d8f74c4f0a50a4f282b0211b8bb0b616919d5a..7c7cd217333d8af262e802081fb775ea4db2f7b4 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h @@ -762,10 +762,10 @@ struct ngene { wait_queue_head_t cmd_wq; int cmd_done; - struct semaphore cmd_mutex; - struct semaphore stream_mutex; + struct mutex cmd_mutex; + struct mutex stream_mutex; struct semaphore pll_mutex; - struct semaphore i2c_switch_mutex; + struct mutex i2c_switch_mutex; int i2c_current_channel; int i2c_current_bus; spinlock_t cmd_lock; diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c index 2a044be729da278ad8e2b40dc90039a05968900a..e7bd2b8484e3dea151da347a9c369eedbabacfe5 100644 --- a/drivers/media/pci/tw5864/tw5864-video.c +++ b/drivers/media/pci/tw5864/tw5864-video.c @@ -545,6 +545,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv, switch (input->std) { default: WARN_ON_ONCE(1); + return -EINVAL; case STD_NTSC: f->fmt.pix.height = 480; break; diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 1313cd5334360342d776cee365c53985c82599d4..fb1fa0b82077fa61b808ac515968bd14286097b5 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -475,8 +475,8 @@ config VIDEO_QCOM_VENUS tristate "Qualcomm Venus V4L2 encoder/decoder driver" depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST - select QCOM_MDT_LOADER if (ARM || ARM64) - select QCOM_SCM if (ARM || ARM64) + select QCOM_MDT_LOADER if ARCH_QCOM + select QCOM_SCM if ARCH_QCOM select VIDEOBUF2_DMA_SG select V4L2_MEM2MEM_DEV ---help--- diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 25cbf9e5ac5a749bf4a8a7c2f47a6b36cd6843cd..bba1eb43b5d83ca7dd908ac9834a6f2975869476 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -393,8 +393,8 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, int ret; int i; - if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || - ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) { + if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || + ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264) { width = round_up(q_data->width, 16); height = round_up(q_data->height, 16); } else { @@ -2198,7 +2198,7 @@ static void coda_finish_decode(struct coda_ctx *ctx) ctx->display_idx = display_idx; } -static void coda_error_decode(struct coda_ctx *ctx) +static void coda_decode_timeout(struct coda_ctx *ctx) { struct vb2_v4l2_buffer *dst_buf; @@ -2223,7 +2223,7 @@ const struct coda_context_ops coda_bit_decode_ops = { .start_streaming = coda_start_decoding, .prepare_run = coda_prepare_decode, .finish_run = coda_finish_decode, - .error_run = coda_error_decode, + .run_timeout = coda_decode_timeout, .seq_end_work = coda_seq_end_work, .release = coda_bit_release, }; diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index f92cc7df58fb8e64d039217dcb5fcba5fe60d479..829c7895a98a2b60efcca128075017ea0f988403 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -1164,8 +1164,8 @@ static void coda_pic_run_work(struct work_struct *work) coda_hw_reset(ctx); - if (ctx->ops->error_run) - ctx->ops->error_run(ctx); + if (ctx->ops->run_timeout) + ctx->ops->run_timeout(ctx); } else if (!ctx->aborting) { ctx->ops->finish_run(ctx); } diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 40fe22f0d757327f03b8e9dce43546a745da9b6a..c5f504d8cf67f8f89350d29db3aeb9bfaa3ad15d 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h @@ -183,7 +183,7 @@ struct coda_context_ops { int (*start_streaming)(struct coda_ctx *ctx); int (*prepare_run)(struct coda_ctx *ctx); void (*finish_run)(struct coda_ctx *ctx); - void (*error_run)(struct coda_ctx *ctx); + void (*run_timeout)(struct coda_ctx *ctx); void (*seq_end_work)(struct work_struct *work); void (*release)(struct coda_ctx *ctx); }; diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h index 8f6688a7a111cd0a5dbf9cf7370f9b97a1fcde52..f1b521045d6418d9cde3946490e3e91c7243619c 100644 --- a/drivers/media/platform/davinci/ccdc_hw_device.h +++ b/drivers/media/platform/davinci/ccdc_hw_device.h @@ -42,16 +42,6 @@ struct ccdc_hw_ops { int (*set_hw_if_params) (struct vpfe_hw_if_param *param); /* get interface parameters */ int (*get_hw_if_params) (struct vpfe_hw_if_param *param); - /* - * Pointer to function to set parameters. Used - * for implementing VPFE_S_CCDC_PARAMS - */ - int (*set_params) (void *params); - /* - * Pointer to function to get parameter. Used - * for implementing VPFE_G_CCDC_PARAMS - */ - int (*get_params) (void *params); /* Pointer to function to configure ccdc */ int (*configure) (void); diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c index 73db166dc338e5b352e9718950fe1739e42115e7..6d492dc4c3a9cb0029d16b23eecc86dc3b4db230 100644 --- a/drivers/media/platform/davinci/dm355_ccdc.c +++ b/drivers/media/platform/davinci/dm355_ccdc.c @@ -17,12 +17,7 @@ * This module is for configuring DM355 CCD controller of VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to - * pre-process the Bayer RGB data, before writing it to SDRAM. This - * module also allows application to configure individual - * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. - * To do so, application include dm355_ccdc.h and vpfe_capture.h header - * files. The setparams() API is called by vpfe_capture driver - * to configure module parameters + * pre-process the Bayer RGB data, before writing it to SDRAM. * * TODO: 1) Raw bayer parameter settings and bayer capture * 2) Split module parameter structure to module specific ioctl structs @@ -260,90 +255,6 @@ static void ccdc_setwin(struct v4l2_rect *image_win, dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); } -static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) -{ - if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT || - ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) { - dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n"); - return -EINVAL; - } - - if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 || - ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) { - dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n"); - return -EINVAL; - } - - if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 || - ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) { - dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n"); - return -EINVAL; - } - - if ((ccdcparam->med_filt_thres < 0) || - (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) { - dev_dbg(ccdc_cfg.dev, - "Invalid value of median filter threshold\n"); - return -EINVAL; - } - - if (ccdcparam->data_sz < CCDC_DATA_16BITS || - ccdcparam->data_sz > CCDC_DATA_8BITS) { - dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n"); - return -EINVAL; - } - - if (ccdcparam->alaw.enable) { - if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 || - ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) { - dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n"); - return -EINVAL; - } - } - - if (ccdcparam->blk_clamp.b_clamp_enable) { - if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS || - ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) { - dev_dbg(ccdc_cfg.dev, - "Invalid value of sample pixel\n"); - return -EINVAL; - } - if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES || - ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) { - dev_dbg(ccdc_cfg.dev, - "Invalid value of sample lines\n"); - return -EINVAL; - } - } - return 0; -} - -/* Parameter operations */ -static int ccdc_set_params(void __user *params) -{ - struct ccdc_config_params_raw ccdc_raw_params; - int x; - - /* only raw module parameters can be set through the IOCTL */ - if (ccdc_cfg.if_type != VPFE_RAW_BAYER) - return -EINVAL; - - x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); - if (x) { - dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n", - x); - return -EFAULT; - } - - if (!validate_ccdc_param(&ccdc_raw_params)) { - memcpy(&ccdc_cfg.bayer.config_params, - &ccdc_raw_params, - sizeof(ccdc_raw_params)); - return 0; - } - return -EINVAL; -} - /* This function will configure CCDC for YCbCr video capture */ static void ccdc_config_ycbcr(void) { @@ -939,7 +850,6 @@ static struct ccdc_hw_device ccdc_hw_dev = { .enable = ccdc_enable, .enable_out_to_sdram = ccdc_enable_output_to_sdram, .set_hw_if_params = ccdc_set_hw_if_params, - .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c index 740fbc7a8c149172202c082576fdf159b6ba5bc7..3b2d8a9317b8d872c894cfa46defd4006096ac59 100644 --- a/drivers/media/platform/davinci/dm644x_ccdc.c +++ b/drivers/media/platform/davinci/dm644x_ccdc.c @@ -17,13 +17,9 @@ * This module is for configuring CCD controller of DM6446 VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to - * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This - * module also allows application to configure individual - * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. - * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header - * files. The setparams() API is called by vpfe_capture driver - * to configure module parameters. This file is named DM644x so that other - * variants such DM6443 may be supported using the same module. + * pre-process the Raw Bayer RGB data, before writing it to SDRAM. + * This file is named DM644x so that other variants such DM6443 + * may be supported using the same module. * * TODO: Test Raw bayer parameter settings and bayer capture * Split module parameter structure to module specific ioctl structs @@ -216,96 +212,8 @@ static void ccdc_readregs(void) dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); } -static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) -{ - if (ccdcparam->alaw.enable) { - u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd); - u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); - - if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) || - (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) || - (max_gamma > max_data)) { - dev_dbg(ccdc_cfg.dev, "\nInvalid data line select"); - return -1; - } - } - return 0; -} - -static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) -{ - struct ccdc_config_params_raw *config_params = - &ccdc_cfg.bayer.config_params; - unsigned int *fpc_virtaddr = NULL; - unsigned int *fpc_physaddr = NULL; - - memcpy(config_params, raw_params, sizeof(*raw_params)); - /* - * allocate memory for fault pixel table and copy the user - * values to the table - */ - if (!config_params->fault_pxl.enable) - return 0; - - fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; - fpc_virtaddr = (unsigned int *)phys_to_virt( - (unsigned long)fpc_physaddr); - /* - * Allocate memory for FPC table if current - * FPC table buffer is not big enough to - * accommodate FPC Number requested - */ - if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { - if (fpc_physaddr != NULL) { - free_pages((unsigned long)fpc_virtaddr, - get_order - (config_params->fault_pxl.fp_num * - FP_NUM_BYTES)); - } - - /* Allocate memory for FPC table */ - fpc_virtaddr = - (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, - get_order(raw_params-> - fault_pxl.fp_num * - FP_NUM_BYTES)); - - if (fpc_virtaddr == NULL) { - dev_dbg(ccdc_cfg.dev, - "\nUnable to allocate memory for FPC"); - return -EFAULT; - } - fpc_physaddr = - (unsigned int *)virt_to_phys((void *)fpc_virtaddr); - } - - /* Copy number of fault pixels and FPC table */ - config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; - if (copy_from_user(fpc_virtaddr, - (void __user *)raw_params->fault_pxl.fpc_table_addr, - config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { - dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed"); - return -EFAULT; - } - config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr; - return 0; -} - static int ccdc_close(struct device *dev) { - struct ccdc_config_params_raw *config_params = - &ccdc_cfg.bayer.config_params; - unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; - - fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; - - if (fpc_physaddr != NULL) { - fpc_virtaddr = (unsigned int *) - phys_to_virt((unsigned long)fpc_physaddr); - free_pages((unsigned long)fpc_virtaddr, - get_order(config_params->fault_pxl.fp_num * - FP_NUM_BYTES)); - } return 0; } @@ -339,29 +247,6 @@ static void ccdc_sbl_reset(void) vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); } -/* Parameter operations */ -static int ccdc_set_params(void __user *params) -{ - struct ccdc_config_params_raw ccdc_raw_params; - int x; - - if (ccdc_cfg.if_type != VPFE_RAW_BAYER) - return -EINVAL; - - x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); - if (x) { - dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n", - x); - return -EFAULT; - } - - if (!validate_ccdc_param(&ccdc_raw_params)) { - if (!ccdc_update_raw_params(&ccdc_raw_params)) - return 0; - } - return -EINVAL; -} - /* * ccdc_config_ycbcr() * This function will configure CCDC for YCbCr video capture @@ -489,32 +374,6 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) regw(val, CCDC_BLKCMP); } -static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) -{ - u32 val; - - /* Initially disable FPC */ - val = CCDC_FPC_DISABLE; - regw(val, CCDC_FPC); - - if (!fpc->enable) - return; - - /* Configure Fault pixel if needed */ - regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); - dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n", - (fpc->fpc_table_addr)); - /* Write the FPC params with FPC disable */ - val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; - regw(val, CCDC_FPC); - - dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); - /* read the FPC register */ - val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; - regw(val, CCDC_FPC); - dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); -} - /* * ccdc_config_raw() * This function will configure CCDC for Raw capture mode @@ -569,9 +428,6 @@ static void ccdc_config_raw(void) /* Configure Black level compensation */ ccdc_config_black_compense(&config_params->blk_comp); - /* Configure Fault Pixel Correction */ - ccdc_config_fpc(&config_params->fault_pxl); - /* If data size is 8 bit then pack the data */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) @@ -929,7 +785,6 @@ static struct ccdc_hw_device ccdc_hw_dev = { .reset = ccdc_sbl_reset, .enable = ccdc_enable, .set_hw_if_params = ccdc_set_hw_if_params, - .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index e3fe3e0635aa87806abf4b15e4ba83264ec8b557..b1bf4a7e8eb7a846cb11b1d65a9d40f7d6a29267 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -280,45 +280,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev) } EXPORT_SYMBOL(vpfe_unregister_ccdc_device); -/* - * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings - */ -static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev, - struct v4l2_format *f) -{ - struct v4l2_rect image_win; - enum ccdc_buftype buf_type; - enum ccdc_frmfmt frm_fmt; - - memset(f, 0, sizeof(*f)); - f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; - ccdc_dev->hw_ops.get_image_window(&image_win); - f->fmt.pix.width = image_win.width; - f->fmt.pix.height = image_win.height; - f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length(); - f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * - f->fmt.pix.height; - buf_type = ccdc_dev->hw_ops.get_buftype(); - f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format(); - frm_fmt = ccdc_dev->hw_ops.get_frame_format(); - if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) - f->fmt.pix.field = V4L2_FIELD_NONE; - else if (frm_fmt == CCDC_FRMFMT_INTERLACED) { - if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) - f->fmt.pix.field = V4L2_FIELD_INTERLACED; - else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) - f->fmt.pix.field = V4L2_FIELD_SEQ_TB; - else { - v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n"); - return -EINVAL; - } - } else { - v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n"); - return -EINVAL; - } - return 0; -} - /* * vpfe_config_ccdc_image_format() * For a pix format, configure ccdc to setup the capture @@ -1697,59 +1658,6 @@ static int vpfe_s_selection(struct file *file, void *priv, return ret; } - -static long vpfe_param_handler(struct file *file, void *priv, - bool valid_prio, unsigned int cmd, void *param) -{ - struct vpfe_device *vpfe_dev = video_drvdata(file); - int ret; - - v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); - - if (vpfe_dev->started) { - /* only allowed if streaming is not started */ - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "device already started\n"); - return -EBUSY; - } - - ret = mutex_lock_interruptible(&vpfe_dev->lock); - if (ret) - return ret; - - switch (cmd) { - case VPFE_CMD_S_CCDC_RAW_PARAMS: - v4l2_warn(&vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); - if (ccdc_dev->hw_ops.set_params) { - ret = ccdc_dev->hw_ops.set_params(param); - if (ret) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Error setting parameters in CCDC\n"); - goto unlock_out; - } - ret = vpfe_get_ccdc_image_format(vpfe_dev, - &vpfe_dev->fmt); - if (ret < 0) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Invalid image format at CCDC\n"); - goto unlock_out; - } - } else { - ret = -EINVAL; - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); - } - break; - default: - ret = -ENOTTY; - } -unlock_out: - mutex_unlock(&vpfe_dev->lock); - return ret; -} - - /* vpfe capture ioctl operations */ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { .vidioc_querycap = vpfe_querycap, @@ -1772,7 +1680,6 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { .vidioc_cropcap = vpfe_cropcap, .vidioc_g_selection = vpfe_g_selection, .vidioc_s_selection = vpfe_s_selection, - .vidioc_default = vpfe_param_handler, }; static struct vpfe_device *vpfe_initialize(void) diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index d78580f9e431f669797ce816a928f579cb956793..4be6554c56c5245fc75eb99c22ba2be9d386a33f 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1719,7 +1719,6 @@ static __init int vpif_probe(struct platform_device *pdev) */ static int vpif_remove(struct platform_device *device) { - struct common_obj *common; struct channel_obj *ch; int i; @@ -1730,7 +1729,6 @@ static int vpif_remove(struct platform_device *device) for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; - common = &ch->common[VPIF_VIDEO_INDEX]; /* Unregister video device */ video_unregister_device(&ch->video_dev); kfree(vpif_obj.dev[i]); diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index b5ac6ce626b3c664ab7ac9b1852ac327c6663721..bf982bf86542accce57aa97efad2edaeb634e832 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -1339,7 +1339,6 @@ static __init int vpif_probe(struct platform_device *pdev) */ static int vpif_remove(struct platform_device *device) { - struct common_obj *common; struct channel_obj *ch; int i; @@ -1350,7 +1349,6 @@ static int vpif_remove(struct platform_device *device) for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; - common = &ch->common[VPIF_VIDEO_INDEX]; /* Unregister video device */ video_unregister_device(&ch->video_dev); kfree(vpif_obj.dev[i]); diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index 92c4e18263566888abd32837f656e4fe7105fbf7..45a553d4f5b2fb6d27c6e0e0efc2eb8c1edade39 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c @@ -16,7 +16,6 @@ #include #include -#include #include